diff --git a/.gitignore b/.gitignore index 551b1b5361ce2..f483bc772a874 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ *.suo *.vcxproj.user *.patch +*.diff .idea .svn .classpath @@ -50,16 +51,12 @@ patchprocess/ .history/ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log - -# Ignore files generated by HDDS acceptance tests. -hadoop-ozone/acceptance-test/docker-compose.log -hadoop-ozone/acceptance-test/junit-results.xml +phantomjsdriver.log #robotframework outputs log.html output.xml report.html -hadoop-hdds/docs/public .mvn diff --git a/BUILDING.txt b/BUILDING.txt index d54ce83183846..01446fdb68da7 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -63,19 +63,16 @@ Installing required packages for clean install of Ubuntu 14.04 LTS Desktop: * Native libraries $ sudo apt-get -y install build-essential autoconf automake libtool cmake zlib1g-dev pkg-config libssl-dev libsasl2-dev * Protocol Buffers 3.7.1 (required to build native code) - $ mkdir -p /opt/protobuf-3.7-src \ - && curl -L -s -S \ - https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \ - -o /opt/protobuf-3.7.1.tar.gz \ - && tar xzf /opt/protobuf-3.7.1.tar.gz --strip-components 1 -C /opt/protobuf-3.7-src \ - && cd /opt/protobuf-3.7-src \ - && ./configure\ - && make install \ - && rm -rf /opt/protobuf-3.7-src + $ curl -L -s -S https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz -o protobuf-3.7.1.tar.gz + $ mkdir protobuf-3.7-src + $ tar xzf protobuf-3.7.1.tar.gz --strip-components 1 -C protobuf-3.7-src && cd protobuf-3.7-src + $ ./configure + $ make -j$(nproc) + $ sudo make install Optional packages: -* Snappy compression +* Snappy compression (only used for hadoop-mapreduce-client-nativetask) $ sudo apt-get install snappy libsnappy-dev * Intel ISA-L library for erasure coding Please refer to https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version @@ -96,7 +93,7 @@ Maven main modules: - hadoop-project (Parent POM for all Hadoop Maven modules. ) (All plugins & dependencies versions are defined here.) - hadoop-project-dist (Parent POM for modules that generate distributions.) - - hadoop-annotations (Generates the Hadoop doclet used to generated the Javadocs) + - hadoop-annotations (Generates the Hadoop doclet used to generate the Javadocs) - hadoop-assemblies (Maven assemblies used by the different modules) - hadoop-maven-plugins (Maven plugins used in project) - hadoop-build-tools (Build tools like checkstyle, etc.) @@ -113,7 +110,7 @@ Maven main modules: ---------------------------------------------------------------------------------- Where to run Maven from? - It can be run from any module. The only catch is that if not run from utrunk + It can be run from any module. The only catch is that if not run from trunk all modules that are not part of the build run must be installed in the local Maven cache or available in a Maven repository. @@ -124,7 +121,7 @@ Maven build goals: * Compile : mvn compile [-Pnative] * Run tests : mvn test [-Pnative] [-Pshelltest] * Create JAR : mvn package - * Run findbugs : mvn compile findbugs:findbugs + * Run spotbugs : mvn compile spotbugs:spotbugs * Run checkstyle : mvn compile checkstyle:checkstyle * Install JAR in M2 cache : mvn install * Deploy JAR to Maven repo : mvn deploy @@ -161,14 +158,14 @@ Maven build goals: Snappy is a compression library that can be utilized by the native code. It is currently an optional component, meaning that Hadoop can be built with - or without this dependency. + or without this dependency. Snappy library as optional dependency is only + used for hadoop-mapreduce-client-nativetask. * Use -Drequire.snappy to fail the build if libsnappy.so is not found. If this option is not specified and the snappy library is missing, we silently build a version of libhadoop.so that cannot make use of snappy. This option is recommended if you plan on making use of snappy and want to get more repeatable builds. - * Use -Dsnappy.prefix to specify a nonstandard location for the libsnappy header files and library files. You do not need this option if you have installed snappy using a package manager. diff --git a/LICENSE-binary b/LICENSE-binary index 921d486b23be2..0aac03bb00000 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -215,7 +215,7 @@ com.aliyun:aliyun-java-sdk-ecs:4.2.0 com.aliyun:aliyun-java-sdk-ram:3.0.0 com.aliyun:aliyun-java-sdk-sts:3.0.0 com.aliyun.oss:aliyun-sdk-oss:3.4.1 -com.amazonaws:aws-java-sdk-bundle:1.11.563 +com.amazonaws:aws-java-sdk-bundle:1.11.901 com.cedarsoftware:java-util:1.9.0 com.cedarsoftware:json-io:2.5.1 com.fasterxml.jackson.core:jackson-annotations:2.9.9 @@ -225,7 +225,7 @@ com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.9.9 com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.9.9 com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.9.9 com.fasterxml.uuid:java-uuid-generator:3.1.4 -com.fasterxml.woodstox:woodstox-core:5.0.3 +com.fasterxml.woodstox:woodstox-core:5.3.0 com.github.davidmoten:rxjava-extras:0.8.0.17 com.github.stephenc.jcip:jcip-annotations:1.0-1 com.google:guice:4.0 @@ -366,7 +366,7 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanage com.github.luben:zstd-jni:1.4.3-1 dnsjava:dnsjava:2.1.7 -org.codehaus.woodstox:stax2-api:3.1.4 +org.codehaus.woodstox:stax2-api:4.2.1 BSD 3-Clause @@ -385,7 +385,7 @@ com.google.protobuf:protobuf-java:3.6.1 com.google.re2j:re2j:1.1 com.jcraft:jsch:0.1.54 com.thoughtworks.paranamer:paranamer:2.3 -javax.activation:javax.activation-api:1.2.0 +jakarta.activation:jakarta.activation-api:1.2.1 org.fusesource.leveldbjni:leveldbjni-all:1.8 org.jline:jline:3.9.0 org.hamcrest:hamcrest-core:1.3 @@ -468,8 +468,8 @@ com.microsoft.azure:azure-cosmosdb-gateway:2.4.5 com.microsoft.azure:azure-data-lake-store-sdk:2.3.3 com.microsoft.azure:azure-keyvault-core:1.0.0 com.microsoft.sqlserver:mssql-jdbc:6.2.1.jre7 -org.bouncycastle:bcpkix-jdk15on:1.60 -org.bouncycastle:bcprov-jdk15on:1.60 +org.bouncycastle:bcpkix-jdk15on:1.68 +org.bouncycastle:bcprov-jdk15on:1.68 org.checkerframework:checker-qual:2.5.2 org.codehaus.mojo:animal-sniffer-annotations:1.17 org.jruby.jcodings:jcodings:1.0.13 diff --git a/Jenkinsfile b/dev-support/Jenkinsfile similarity index 83% rename from Jenkinsfile rename to dev-support/Jenkinsfile index 17e74661fc471..a4e5677e4aeb0 100644 --- a/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -23,7 +23,7 @@ pipeline { options { buildDiscarder(logRotator(numToKeepStr: '5')) - timeout (time: 5, unit: 'HOURS') + timeout (time: 20, unit: 'HOURS') timestamps() checkoutToSubdirectory('src') } @@ -35,7 +35,7 @@ pipeline { DOCKERFILE = "${SOURCEDIR}/dev-support/docker/Dockerfile" YETUS='yetus' // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION='rel/0.11.1' + YETUS_VERSION='11eb9b09786e401fbdeaa3be83a19a4066fd7813' } parameters { @@ -61,7 +61,7 @@ pipeline { steps { withCredentials( [usernamePassword(credentialsId: 'apache-hadoop-at-github.com', - passwordVariable: 'GITHUB_PASSWORD', + passwordVariable: 'GITHUB_TOKEN', usernameVariable: 'GITHUB_USER'), usernamePassword(credentialsId: 'hadoopqa-at-asf-jira', passwordVariable: 'JIRA_PASSWORD', @@ -96,8 +96,8 @@ pipeline { YETUS_ARGS+=("--basedir=${WORKSPACE}/${SOURCEDIR}") # our project defaults come from a personality file - # which will get loaded automatically by setting the project name YETUS_ARGS+=("--project=hadoop") + YETUS_ARGS+=("--personality=${WORKSPACE}/${SOURCEDIR}/dev-support/bin/hadoop.sh") # lots of different output formats YETUS_ARGS+=("--brief-report-file=${WORKSPACE}/${PATCHDIR}/brief.txt") @@ -105,8 +105,7 @@ pipeline { YETUS_ARGS+=("--html-report-file=${WORKSPACE}/${PATCHDIR}/report.html") # enable writing back to Github - YETUS_ARGS+=(--github-password="${GITHUB_PASSWORD}") - YETUS_ARGS+=(--github-user=${GITHUB_USER}) + YETUS_ARGS+=(--github-token="${GITHUB_TOKEN}") # enable writing back to ASF JIRA YETUS_ARGS+=(--jira-password="${JIRA_PASSWORD}") @@ -119,13 +118,13 @@ pipeline { # changing these to higher values may cause problems # with other jobs on systemd-enabled machines YETUS_ARGS+=("--proclimit=5500") - YETUS_ARGS+=("--dockermemlimit=20g") + YETUS_ARGS+=("--dockermemlimit=22g") - # -1 findbugs issues that show up prior to the patch being applied - YETUS_ARGS+=("--findbugs-strict-precheck") + # -1 spotbugs issues that show up prior to the patch being applied + YETUS_ARGS+=("--spotbugs-strict-precheck") # rsync these files back into the archive dir - YETUS_ARGS+=("--archive-list=checkstyle-errors.xml,findbugsXml.xml") + YETUS_ARGS+=("--archive-list=checkstyle-errors.xml,spotbugsXml.xml") # URL for user-side presentation in reports and such to our artifacts # (needs to match the archive bits below) @@ -134,9 +133,6 @@ pipeline { # plugins to enable YETUS_ARGS+=("--plugins=all") - # use Hadoop's bundled shelldocs - YETUS_ARGS+=("--shelldocs=/testptch/hadoop/dev-support/bin/shelldocs") - # don't let these tests cause -1s because we aren't really paying that # much attention to them YETUS_ARGS+=("--tests-filter=checkstyle") @@ -145,6 +141,7 @@ pipeline { # Dockerfile since we don't want to use the auto-pulled version. YETUS_ARGS+=("--docker") YETUS_ARGS+=("--dockerfile=${DOCKERFILE}") + YETUS_ARGS+=("--mvn-custom-repos") # effectively treat dev-suport as a custom maven module YETUS_ARGS+=("--skip-dirs=dev-support") @@ -152,7 +149,11 @@ pipeline { # help keep the ASF boxes clean YETUS_ARGS+=("--sentinel") - # use emoji vote so it is easier to find the broken line + # custom javadoc goals + YETUS_ARGS+=("--mvn-javadoc-goals=process-sources,javadoc:javadoc-no-fork") + + # write Yetus report as GitHub comment (YETUS-1102) + YETUS_ARGS+=("--github-write-comment") YETUS_ARGS+=("--github-use-emoji-vote") "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" @@ -166,6 +167,19 @@ pipeline { post { always { script { + // Publish status if it was missed (YETUS-1059) + withCredentials( + [usernamePassword(credentialsId: '683f5dcf-5552-4b28-9fb1-6a6b77cf53dd', + passwordVariable: 'GITHUB_TOKEN', + usernameVariable: 'GITHUB_USER')]) { + sh '''#!/usr/bin/env bash + YETUS_ARGS+=("--github-token=${GITHUB_TOKEN}") + YETUS_ARGS+=("--patch-dir=${WORKSPACE}/${PATCHDIR}") + TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/github-status-recovery.sh" + /usr/bin/env bash "${TESTPATCHBIN}" "${YETUS_ARGS[@]}" ${EXTRA_ARGS} || true + ''' + } + // Yetus output archiveArtifacts "${env.PATCHDIR}/**" // Publish the HTML report so that it can be looked at diff --git a/dev-support/bin/checkcompatibility.py b/dev-support/bin/checkcompatibility.py index ad1e9cbe47ff2..3db36154ef9c5 100755 --- a/dev-support/bin/checkcompatibility.py +++ b/dev-support/bin/checkcompatibility.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -30,33 +30,16 @@ import shutil import subprocess import sys -import urllib2 -try: - import argparse -except ImportError: - sys.stderr.write("Please install argparse, e.g. via `pip install argparse`.") - sys.exit(2) +import urllib.request +import argparse # Various relative paths REPO_DIR = os.getcwd() def check_output(*popenargs, **kwargs): - r"""Run command with arguments and return its output as a byte string. - Backported from Python 2.7 as it's implemented as pure python on stdlib. - >>> check_output(['/usr/bin/python', '--version']) - Python 2.6.2 - """ - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) - output, _ = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - error = subprocess.CalledProcessError(retcode, cmd) - error.output = output - raise error - return output + """ Run command with arguments and return its output as a string. """ + return subprocess.check_output(*popenargs, **kwargs, encoding='utf-8') + def get_repo_dir(): """ Return the path to the top of the repo. """ @@ -139,7 +122,7 @@ def checkout_java_acc(force): url = "https://github.com/lvc/japi-compliance-checker/archive/1.8.tar.gz" scratch_dir = get_scratch_dir() path = os.path.join(scratch_dir, os.path.basename(url)) - jacc = urllib2.urlopen(url) + jacc = urllib.request.urlopen(url) with open(path, 'wb') as w: w.write(jacc.read()) @@ -194,7 +177,7 @@ def run_java_acc(src_name, src_jars, dst_name, dst_jars, annotations): annotations_path = os.path.join(get_scratch_dir(), "annotations.txt") with file(annotations_path, "w") as f: for ann in annotations: - print >>f, ann + print(ann, file=f) args += ["-annotations-list", annotations_path] subprocess.check_call(args) @@ -264,8 +247,8 @@ def main(): parser.add_argument("--skip-build", action="store_true", help="Skip building the projects.") - parser.add_argument("src_rev", nargs=1, help="Source revision.") - parser.add_argument("dst_rev", nargs="?", default="HEAD", + parser.add_argument("src_rev", nargs=1, type=str, help="Source revision.") + parser.add_argument("dst_rev", nargs="?", type=str, default="HEAD", help="Destination revision. " + "If not specified, will use HEAD.") diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release index f4851d1c76498..39a5d0d319837 100755 --- a/dev-support/bin/create-release +++ b/dev-support/bin/create-release @@ -651,10 +651,12 @@ function signartifacts big_console_header "Signing the release" - for i in ${ARTIFACTS_DIR}/*; do + run cd "${ARTIFACTS_DIR}" + for i in *; do ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}" sha512sum --tag "${i}" > "${i}.sha512" done + run cd "${BASEDIR}" if [[ "${ASFRELEASE}" = true ]]; then echo "Fetching the Apache Hadoop KEYS file..." diff --git a/dev-support/bin/dist-copynativelibs b/dev-support/bin/dist-copynativelibs index ffc82b8fb1b05..95de186e7e729 100755 --- a/dev-support/bin/dist-copynativelibs +++ b/dev-support/bin/dist-copynativelibs @@ -111,9 +111,6 @@ for i in "$@"; do --openssllibbundle=*) OPENSSLLIBBUNDLE=${i#*=} ;; - --snappybinbundle=*) - SNAPPYBINBUNDLE=${i#*=} - ;; --snappylib=*) SNAPPYLIB=${i#*=} ;; @@ -167,7 +164,7 @@ fi # Windows doesn't have a LIB_DIR, everything goes into bin -if [[ -d "${BIN_DIR}" ]] ; then +if [[ -d "${BIN_DIR}" && $(ls -A "${BIN_DIR}") ]] ; then mkdir -p "${TARGET_BIN_DIR}" cd "${BIN_DIR}" || exit 1 ${TAR} ./* | (cd "${TARGET_BIN_DIR}"/ || exit 1; ${UNTAR}) @@ -176,8 +173,6 @@ if [[ -d "${BIN_DIR}" ]] ; then exit 1 fi - bundle_native_bin "${SNAPPYBINBUNDLE}" "${SNAPPYLIBBUNDLE}" "snappy.lib" "snappy" "${SNAPPYLIB}" - bundle_native_bin "${ZSTDBINBUNDLE}" "${ZSTDLIBBUNDLE}" "zstd.lib" "zstd" "${ZSTDLIB}" bundle_native_bin "${OPENSSLBINBUNDLE}" "${OPENSSLLIBBUNDLE}" "openssl.lib" "crypto" "${OPENSSLLIB}" diff --git a/dev-support/bin/dist-layout-stitching b/dev-support/bin/dist-layout-stitching index 20e8cf27805e2..d4bfd8aaada3b 100755 --- a/dev-support/bin/dist-layout-stitching +++ b/dev-support/bin/dist-layout-stitching @@ -21,9 +21,6 @@ VERSION=$1 # project.build.directory BASEDIR=$2 -#hdds.version -HDDS_VERSION=$3 - function run() { declare res diff --git a/dev-support/bin/hadoop.sh b/dev-support/bin/hadoop.sh new file mode 100755 index 0000000000000..beebea8c97f6b --- /dev/null +++ b/dev-support/bin/hadoop.sh @@ -0,0 +1,563 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# SHELLDOC-IGNORE +# +# Override these to match Apache Hadoop's requirements +personality_plugins "all,-ant,-gradle,-scalac,-scaladoc" + +## @description Globals specific to this personality +## @audience private +## @stability evolving +function personality_globals +{ + # shellcheck disable=SC2034 + BUILDTOOL=maven + #shellcheck disable=SC2034 + PATCH_BRANCH_DEFAULT=trunk + #shellcheck disable=SC2034 + PATCH_NAMING_RULE="https://cwiki.apache.org/confluence/display/HADOOP/How+To+Contribute" + #shellcheck disable=SC2034 + JIRA_ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$' + #shellcheck disable=SC2034 + GITHUB_REPO_DEFAULT="apache/hadoop" + + HADOOP_HOMEBREW_DIR=${HADOOP_HOMEBREW_DIR:-$(brew --prefix 2>/dev/null)} + if [[ -z "${HADOOP_HOMEBREW_DIR}" ]]; then + HADOOP_HOMEBREW_DIR=/usr/local + fi +} + +function personality_parse_args +{ + declare i + + for i in "$@"; do + case ${i} in + --hadoop-isal-prefix=*) + delete_parameter "${i}" + ISAL_HOME=${i#*=} + ;; + --hadoop-openssl-prefix=*) + delete_parameter "${i}" + OPENSSL_HOME=${i#*=} + ;; + --hadoop-snappy-prefix=*) + delete_parameter "${i}" + SNAPPY_HOME=${i#*=} + ;; + esac + done +} + +## @description Calculate the actual module ordering +## @audience private +## @stability evolving +## @param ordering +function hadoop_order +{ + declare ordering=$1 + declare hadoopm + + if [[ ${ordering} = normal ]]; then + hadoopm="${CHANGED_MODULES[*]}" + elif [[ ${ordering} = union ]]; then + hadoopm="${CHANGED_UNION_MODULES}" + elif [[ ${ordering} = mvnsrc ]]; then + hadoopm="${MAVEN_SRC_MODULES[*]}" + elif [[ ${ordering} = mvnsrctest ]]; then + hadoopm="${MAVEN_SRCTEST_MODULES[*]}" + else + hadoopm="${ordering}" + fi + echo "${hadoopm}" +} + +## @description Determine if it is safe to run parallel tests +## @audience private +## @stability evolving +## @param ordering +function hadoop_test_parallel +{ + if [[ -f "${BASEDIR}/pom.xml" ]]; then + HADOOP_VERSION=$(grep '' "${BASEDIR}/pom.xml" \ + | head -1 \ + | "${SED}" -e 's|^ *||' -e 's|.*$||' \ + | cut -f1 -d- ) + export HADOOP_VERSION + else + return 1 + fi + + hmajor=${HADOOP_VERSION%%\.*} + hmajorminor=${HADOOP_VERSION%\.*} + hminor=${hmajorminor##*\.} + # ... and just for reference + #hmicro=${HADOOP_VERSION##*\.} + + # Apache Hadoop v2.8.0 was the first one to really + # get working parallel unit tests + if [[ ${hmajor} -lt 3 && ${hminor} -lt 8 ]]; then + return 1 + fi + + return 0 +} + +## @description Install extra modules for unit tests +## @audience private +## @stability evolving +## @param ordering +function hadoop_unittest_prereqs +{ + declare input=$1 + declare mods + declare need_common=0 + declare building_common=0 + declare module + declare flags + declare fn + + # prior to running unit tests, hdfs needs libhadoop.so built + # if we're building root, then this extra work is moot + + #shellcheck disable=SC2086 + mods=$(hadoop_order ${input}) + + for module in ${mods}; do + if [[ ${module} = hadoop-hdfs-project* ]]; then + need_common=1 + elif [[ ${module} = hadoop-common-project/hadoop-common + || ${module} = hadoop-common-project ]]; then + building_common=1 + elif [[ ${module} = . ]]; then + return + fi + done + + # Windows builds *ALWAYS* need hadoop-common compiled + case ${OSTYPE} in + Windows_NT|CYGWIN*|MINGW*|MSYS*) + need_common=1 + ;; + esac + + if [[ ${need_common} -eq 1 + && ${building_common} -eq 0 ]]; then + echo "unit test pre-reqs:" + module="hadoop-common-project/hadoop-common" + fn=$(module_file_fragment "${module}") + flags="$(hadoop_native_flags) $(yarn_ui2_flag)" + pushd "${BASEDIR}/${module}" >/dev/null || return 1 + # shellcheck disable=SC2086 + echo_and_redirect "${PATCH_DIR}/maven-unit-prereq-${fn}-install.txt" \ + "${MAVEN}" "${MAVEN_ARGS[@]}" install -DskipTests ${flags} + popd >/dev/null || return 1 + fi +} + +## @description Calculate the flags/settings for yarn-ui v2 build +## @description based upon the OS +## @audience private +## @stability evolving +function yarn_ui2_flag +{ + + if [[ ${BUILD_NATIVE} != true ]]; then + return + fi + + # Now it only tested on Linux/OSX, don't enable the profile on + # windows until it get verified + case ${OSTYPE} in + Linux) + # shellcheck disable=SC2086 + echo -Pyarn-ui + ;; + Darwin) + echo -Pyarn-ui + ;; + *) + # Do nothing + ;; + esac +} + +## @description Calculate the flags/settings for native code +## @description based upon the OS +## @audience private +## @stability evolving +function hadoop_native_flags +{ + if [[ ${BUILD_NATIVE} != true ]]; then + return + fi + + declare -a args + + # Based upon HADOOP-11937 + # + # Some notes: + # + # - getting fuse to compile on anything but Linux + # is always tricky. + # - Darwin assumes homebrew is in use. + # - HADOOP-12027 required for bzip2 on OS X. + # - bzip2 is broken in lots of places + # (the shared library is considered experimental) + # e.g, HADOOP-12027 for OS X. so no -Drequire.bzip2 + # + + args=("-Drequire.test.libhadoop") + + if [[ -d "${ISAL_HOME}/include" ]]; then + args=("${args[@]}" "-Disal.prefix=${ISAL_HOME}") + fi + + if [[ -d "${OPENSSL_HOME}/include" ]]; then + args=("${args[@]}" "-Dopenssl.prefix=${OPENSSL_HOME}") + elif [[ -d "${HADOOP_HOMEBREW_DIR}/opt/openssl/" ]]; then + args=("${args[@]}" "-Dopenssl.prefix=${HADOOP_HOMEBREW_DIR}/opt/openssl/") + fi + + if [[ -d "${SNAPPY_HOME}/include" ]]; then + args=("${args[@]}" "-Dsnappy.prefix=${SNAPPY_HOME}") + elif [[ -d "${HADOOP_HOMEBREW_DIR}/include/snappy.h" ]]; then + args=("${args[@]}" "-Dsnappy.prefix=${HADOOP_HOMEBREW_DIR}/opt/snappy") + fi + + case ${OSTYPE} in + Linux) + # shellcheck disable=SC2086 + echo \ + -Pnative \ + -Drequire.fuse \ + -Drequire.openssl \ + -Drequire.snappy \ + -Drequire.valgrind \ + -Drequire.zstd \ + "${args[@]}" + ;; + Darwin) + echo \ + "${args[@]}" \ + -Pnative \ + -Drequire.snappy \ + -Drequire.openssl + ;; + Windows_NT|CYGWIN*|MINGW*|MSYS*) + echo \ + "${args[@]}" \ + -Drequire.snappy -Drequire.openssl -Pnative-win + ;; + *) + echo \ + "${args[@]}" + ;; + esac +} + +## @description Queue up modules for this personality +## @audience private +## @stability evolving +## @param repostatus +## @param testtype +function personality_modules +{ + declare repostatus=$1 + declare testtype=$2 + declare extra="" + declare ordering="normal" + declare needflags=false + declare foundbats=false + declare flags + declare fn + declare i + declare hadoopm + + yetus_debug "Personality: ${repostatus} ${testtype}" + + clear_personality_queue + + case ${testtype} in + asflicense) + # this is very fast and provides the full path if we do it from + # the root of the source + personality_enqueue_module . + return + ;; + checkstyle) + ordering="union" + extra="-DskipTests" + ;; + compile) + ordering="union" + extra="-DskipTests" + needflags=true + + # if something in common changed, we build the whole world + if [[ "${CHANGED_MODULES[*]}" =~ hadoop-common ]]; then + yetus_debug "hadoop personality: javac + hadoop-common = ordering set to . " + ordering="." + fi + ;; + distclean) + ordering="." + extra="-DskipTests" + ;; + javadoc) + if [[ "${CHANGED_MODULES[*]}" =~ \. ]]; then + ordering=. + fi + + if [[ "${repostatus}" = patch && "${BUILDMODE}" = patch ]]; then + echo "javadoc pre-reqs:" + for i in hadoop-project \ + hadoop-common-project/hadoop-annotations; do + fn=$(module_file_fragment "${i}") + pushd "${BASEDIR}/${i}" >/dev/null || return 1 + echo "cd ${i}" + echo_and_redirect "${PATCH_DIR}/maven-${fn}-install.txt" \ + "${MAVEN}" "${MAVEN_ARGS[@]}" install + popd >/dev/null || return 1 + done + fi + extra="-Pdocs -DskipTests" + ;; + mvneclipse) + if [[ "${CHANGED_MODULES[*]}" =~ \. ]]; then + ordering=. + fi + ;; + mvninstall) + extra="-DskipTests" + if [[ "${repostatus}" = branch || "${BUILDMODE}" = full ]]; then + ordering=. + fi + ;; + mvnsite) + if [[ "${CHANGED_MODULES[*]}" =~ \. ]]; then + ordering=. + fi + ;; + unit) + if [[ "${BUILDMODE}" = full ]]; then + ordering=mvnsrc + elif [[ "${CHANGED_MODULES[*]}" =~ \. ]]; then + ordering=. + fi + + if [[ ${TEST_PARALLEL} = "true" ]] ; then + if hadoop_test_parallel; then + extra="-Pparallel-tests" + if [[ -n ${TEST_THREADS:-} ]]; then + extra="${extra} -DtestsThreadCount=${TEST_THREADS}" + fi + fi + fi + needflags=true + hadoop_unittest_prereqs "${ordering}" + + if ! verify_needed_test javac; then + yetus_debug "hadoop: javac not requested" + if ! verify_needed_test native; then + yetus_debug "hadoop: native not requested" + yetus_debug "hadoop: adding -DskipTests to unit test" + extra="-DskipTests" + fi + fi + + for i in "${CHANGED_FILES[@]}"; do + if [[ "${i}" =~ \.bats ]]; then + foundbats=true + fi + done + + if ! verify_needed_test shellcheck && [[ ${foundbats} = false ]]; then + yetus_debug "hadoop: NO shell code change detected; disabling shelltest profile" + extra="${extra} -P!shelltest" + else + extra="${extra} -Pshelltest" + fi + ;; + *) + extra="-DskipTests" + ;; + esac + + if [[ ${needflags} = true ]]; then + flags="$(hadoop_native_flags) $(yarn_ui2_flag)" + extra="${extra} ${flags}" + fi + + extra="-Ptest-patch ${extra}" + for module in $(hadoop_order ${ordering}); do + # shellcheck disable=SC2086 + personality_enqueue_module ${module} ${extra} + done +} + +## @description Add tests based upon personality needs +## @audience private +## @stability evolving +## @param filename +function personality_file_tests +{ + declare filename=$1 + + yetus_debug "Using Hadoop-specific personality_file_tests" + + if [[ ${filename} =~ src/main/webapp ]]; then + yetus_debug "tests/webapp: ${filename}" + add_test shadedclient + elif [[ ${filename} =~ \.sh + || ${filename} =~ \.cmd + || ${filename} =~ src/scripts + || ${filename} =~ src/test/scripts + || ${filename} =~ src/main/bin + || ${filename} =~ shellprofile\.d + || ${filename} =~ src/main/conf + ]]; then + yetus_debug "tests/shell: ${filename}" + add_test mvnsite + add_test unit + elif [[ ${filename} =~ \.md$ + || ${filename} =~ \.md\.vm$ + || ${filename} =~ src/site + ]]; then + yetus_debug "tests/site: ${filename}" + add_test mvnsite + elif [[ ${filename} =~ \.c$ + || ${filename} =~ \.cc$ + || ${filename} =~ \.h$ + || ${filename} =~ \.hh$ + || ${filename} =~ \.proto$ + || ${filename} =~ \.cmake$ + || ${filename} =~ CMakeLists.txt + ]]; then + yetus_debug "tests/units: ${filename}" + add_test compile + add_test cc + add_test mvnsite + add_test javac + add_test unit + elif [[ ${filename} =~ build.xml$ + || ${filename} =~ pom.xml$ + || ${filename} =~ \.java$ + || ${filename} =~ src/main + ]]; then + yetus_debug "tests/javadoc+units: ${filename}" + add_test compile + add_test javac + add_test javadoc + add_test mvninstall + add_test mvnsite + add_test unit + add_test shadedclient + fi + + # if we change anything in here, e.g. the test scripts + # then run the client artifact tests + if [[ ${filename} =~ hadoop-client-modules ]]; then + add_test shadedclient + fi + + if [[ ${filename} =~ src/test ]]; then + yetus_debug "tests: src/test" + add_test unit + fi + + if [[ ${filename} =~ \.java$ ]]; then + add_test spotbugs + fi +} + +## @description Image to print on success +## @audience private +## @stability evolving +function hadoop_console_success +{ + printf "IF9fX19fX19fX18gCjwgU3VjY2VzcyEgPgogLS0tLS0tLS0tLSAKIFwgICAg"; + printf "IC9cICBfX18gIC9cCiAgXCAgIC8vIFwvICAgXC8gXFwKICAgICAoKCAgICBP"; + printf "IE8gICAgKSkKICAgICAgXFwgLyAgICAgXCAvLwogICAgICAgXC8gIHwgfCAg"; + printf "XC8gCiAgICAgICAgfCAgfCB8ICB8ICAKICAgICAgICB8ICB8IHwgIHwgIAog"; + printf "ICAgICAgIHwgICBvICAgfCAgCiAgICAgICAgfCB8ICAgfCB8ICAKICAgICAg"; + printf "ICB8bXwgICB8bXwgIAo" +} + +################################################### +# Hadoop project specific check of IT for shaded artifacts + +add_test_type shadedclient + +## @description check for test modules and add test/plugins as needed +## @audience private +## @stability evolving +function shadedclient_initialize +{ + maven_add_install shadedclient +} + +## @description build client facing shaded artifacts and test them +## @audience private +## @stability evolving +## @param repostatus +function shadedclient_rebuild +{ + declare repostatus=$1 + declare logfile="${PATCH_DIR}/${repostatus}-shadedclient.txt" + declare module + declare -a modules=() + + if [[ ${OSTYPE} = Windows_NT || + ${OSTYPE} =~ ^CYGWIN.* || + ${OSTYPE} =~ ^MINGW32.* || + ${OSTYPE} =~ ^MSYS.* ]]; then + echo "hadoop personality: building on windows, skipping check of client artifacts." + return 0 + fi + + yetus_debug "hadoop personality: seeing if we need the test of client artifacts." + for module in hadoop-client-modules/hadoop-client-check-invariants \ + hadoop-client-modules/hadoop-client-check-test-invariants \ + hadoop-client-modules/hadoop-client-integration-tests; do + if [ -d "${module}" ]; then + yetus_debug "hadoop personality: test module '${module}' is present." + modules+=(-pl "${module}") + fi + done + if [ ${#modules[@]} -eq 0 ]; then + echo "hadoop personality: no test modules present, skipping check of client artifacts." + return 0 + fi + + big_console_header "Checking client artifacts on ${repostatus}" + + echo_and_redirect "${logfile}" \ + "${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \ + "${modules[@]}" \ + -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true + + count=$("${GREP}" -c '\[ERROR\]' "${logfile}") + if [[ ${count} -gt 0 ]]; then + add_vote_table -1 shadedclient "${repostatus} has errors when building and testing our client artifacts." + return 1 + fi + + add_vote_table +1 shadedclient "${repostatus} has no errors when building and testing our client artifacts." + return 0 +} diff --git a/dev-support/bin/test-patch b/dev-support/bin/test-patch index 8ff8119b3e086..5faf472d325e8 100755 --- a/dev-support/bin/test-patch +++ b/dev-support/bin/test-patch @@ -15,4 +15,4 @@ # limitations under the License. BINDIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE-0}")" >/dev/null && pwd -P) -exec "${BINDIR}/yetus-wrapper" test-patch --project=hadoop --skip-dir=dev-support "$@" +exec "${BINDIR}/yetus-wrapper" test-patch --project=hadoop --skip-dirs=dev-support "$@" diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper index b0f71f105d85e..8532d1749701b 100755 --- a/dev-support/bin/yetus-wrapper +++ b/dev-support/bin/yetus-wrapper @@ -77,7 +77,7 @@ WANTED="$1" shift ARGV=("$@") -HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.10.0} +HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.13.0} BIN=$(yetus_abs "${BASH_SOURCE-$0}") BINDIR=$(dirname "${BIN}") @@ -144,7 +144,7 @@ else exit 1 fi -if [[ -n "${GPGBIN}" ]]; then +if [[ -n "${GPGBIN}" && ! "${HADOOP_SKIP_YETUS_VERIFICATION}" = true ]]; then if ! mkdir -p .gpg; then yetus_error "ERROR: yetus-dl: Unable to create ${HADOOP_PATCHPROCESS}/.gpg" exit 1 diff --git a/dev-support/determine-flaky-tests-hadoop.py b/dev-support/determine-flaky-tests-hadoop.py deleted file mode 100755 index 8644299bba4a2..0000000000000 --- a/dev-support/determine-flaky-tests-hadoop.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/env python -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Given a jenkins test job, this script examines all runs of the job done -# within specified period of time (number of days prior to the execution -# time of this script), and reports all failed tests. -# -# The output of this script includes a section for each run that has failed -# tests, with each failed test name listed. -# -# More importantly, at the end, it outputs a summary section to list all failed -# tests within all examined runs, and indicate how many runs a same test -# failed, and sorted all failed tests by how many runs each test failed. -# -# This way, when we see failed tests in PreCommit build, we can quickly tell -# whether a failed test is a new failure, or it failed before and how often it -# failed, so to have idea whether it may just be a flaky test. -# -# Of course, to be 100% sure about the reason of a test failure, closer look -# at the failed test for the specific run is necessary. -# -import sys -import platform -sysversion = sys.hexversion -onward30 = False -if sysversion < 0x020600F0: - sys.exit("Minimum supported python version is 2.6, the current version is " + - "Python" + platform.python_version()) - -if sysversion == 0x030000F0: - sys.exit("There is a known bug with Python" + platform.python_version() + - ", please try a different version"); - -if sysversion < 0x03000000: - import urllib2 -else: - onward30 = True - import urllib.request - -import datetime -import json as simplejson -import logging -from optparse import OptionParser -import time - -# Configuration -DEFAULT_JENKINS_URL = "https://builds.apache.org" -DEFAULT_JOB_NAME = "Hadoop-Common-trunk" -DEFAULT_NUM_PREVIOUS_DAYS = 14 -DEFAULT_TOP_NUM_FAILED_TEST = -1 - -SECONDS_PER_DAY = 86400 - -# total number of runs to examine -numRunsToExamine = 0 - -#summary mode -summary_mode = False - -#total number of errors -error_count = 0 - -""" Parse arguments """ -def parse_args(): - parser = OptionParser() - parser.add_option("-J", "--jenkins-url", type="string", - dest="jenkins_url", help="Jenkins URL", - default=DEFAULT_JENKINS_URL) - parser.add_option("-j", "--job-name", type="string", - dest="job_name", help="Job name to look at", - default=DEFAULT_JOB_NAME) - parser.add_option("-n", "--num-days", type="int", - dest="num_prev_days", help="Number of days to examine", - default=DEFAULT_NUM_PREVIOUS_DAYS) - parser.add_option("-t", "--top", type="int", - dest="num_failed_tests", - help="Summary Mode, only show top number of failed tests", - default=DEFAULT_TOP_NUM_FAILED_TEST) - - (options, args) = parser.parse_args() - if args: - parser.error("unexpected arguments: " + repr(args)) - return options - -""" Load data from specified url """ -def load_url_data(url): - if onward30: - ourl = urllib.request.urlopen(url) - codec = ourl.info().get_param('charset') - content = ourl.read().decode(codec) - data = simplejson.loads(content, strict=False) - else: - ourl = urllib2.urlopen(url) - data = simplejson.load(ourl, strict=False) - return data - -""" List all builds of the target project. """ -def list_builds(jenkins_url, job_name): - global summary_mode - url = "%(jenkins)s/job/%(job_name)s/api/json?tree=builds[url,result,timestamp]" % dict( - jenkins=jenkins_url, - job_name=job_name) - - try: - data = load_url_data(url) - - except: - if not summary_mode: - logging.error("Could not fetch: %s" % url) - error_count += 1 - raise - return data['builds'] - -""" Find the names of any tests which failed in the given build output URL. """ -def find_failing_tests(testReportApiJson, jobConsoleOutput): - global summary_mode - global error_count - ret = set() - try: - data = load_url_data(testReportApiJson) - - except: - if not summary_mode: - logging.error(" Could not open testReport, check " + - jobConsoleOutput + " for why it was reported failed") - error_count += 1 - return ret - - for suite in data['suites']: - for cs in suite['cases']: - status = cs['status'] - errDetails = cs['errorDetails'] - if (status == 'REGRESSION' or status == 'FAILED' or (errDetails is not None)): - ret.add(cs['className'] + "." + cs['name']) - - if len(ret) == 0 and (not summary_mode): - logging.info(" No failed tests in testReport, check " + - jobConsoleOutput + " for why it was reported failed.") - return ret - -""" Iterate runs of specfied job within num_prev_days and collect results """ -def find_flaky_tests(jenkins_url, job_name, num_prev_days): - global numRunsToExamine - global summary_mode - all_failing = dict() - # First list all builds - builds = list_builds(jenkins_url, job_name) - - # Select only those in the last N days - min_time = int(time.time()) - SECONDS_PER_DAY * num_prev_days - builds = [b for b in builds if (int(b['timestamp']) / 1000) > min_time] - - # Filter out only those that failed - failing_build_urls = [(b['url'] , b['timestamp']) for b in builds - if (b['result'] in ('UNSTABLE', 'FAILURE'))] - - tnum = len(builds) - num = len(failing_build_urls) - numRunsToExamine = tnum - if not summary_mode: - logging.info(" THERE ARE " + str(num) + " builds (out of " + str(tnum) - + ") that have failed tests in the past " + str(num_prev_days) + " days" - + ((".", ", as listed below:\n")[num > 0])) - - for failed_build_with_time in failing_build_urls: - failed_build = failed_build_with_time[0]; - jobConsoleOutput = failed_build + "Console"; - testReport = failed_build + "testReport"; - testReportApiJson = testReport + "/api/json"; - - ts = float(failed_build_with_time[1]) / 1000. - st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') - if not summary_mode: - logging.info("===>%s" % str(testReport) + " (" + st + ")") - failing = find_failing_tests(testReportApiJson, jobConsoleOutput) - if failing: - for ftest in failing: - if not summary_mode: - logging.info(" Failed test: %s" % ftest) - all_failing[ftest] = all_failing.get(ftest,0)+1 - - return all_failing - -def main(): - global numRunsToExamine - global summary_mode - logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) - - # set up logger to write to stdout - soh = logging.StreamHandler(sys.stdout) - soh.setLevel(logging.INFO) - logger = logging.getLogger() - logger.removeHandler(logger.handlers[0]) - logger.addHandler(soh) - - opts = parse_args() - logging.info("****Recently FAILED builds in url: " + opts.jenkins_url - + "/job/" + opts.job_name + "") - - if opts.num_failed_tests != -1: - summary_mode = True - - all_failing = find_flaky_tests(opts.jenkins_url, opts.job_name, - opts.num_prev_days) - if len(all_failing) == 0: - raise SystemExit(0) - - if summary_mode and opts.num_failed_tests < len(all_failing): - logging.info("\nAmong " + str(numRunsToExamine) + - " runs examined, top " + str(opts.num_failed_tests) + - " failed tests <#failedRuns: testName>:") - else: - logging.info("\nAmong " + str(numRunsToExamine) + - " runs examined, all failed tests <#failedRuns: testName>:") - - # print summary section: all failed tests sorted by how many times they failed - line_count = 0 - for tn in sorted(all_failing, key=all_failing.get, reverse=True): - logging.info(" " + str(all_failing[tn])+ ": " + tn) - if summary_mode: - line_count += 1 - if line_count == opts.num_failed_tests: - break - - if summary_mode and error_count > 0: - logging.info("\n" + str(error_count) + " errors found, you may " - + "re-run in non summary mode to see error details."); - -if __name__ == "__main__": - main() diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index de416fa84093c..e4a55b2c6f6b5 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -18,7 +18,7 @@ # Dockerfile for installing the necessary dependencies for building Hadoop. # See BUILDING.txt. -FROM ubuntu:xenial +FROM ubuntu:bionic WORKDIR /root @@ -44,9 +44,11 @@ ENV DEBCONF_TERSE true RUN apt-get -q update \ && apt-get -q install -y --no-install-recommends \ apt-utils \ + bats \ build-essential \ bzip2 \ clang \ + cmake \ curl \ doxygen \ fuse \ @@ -62,27 +64,43 @@ RUN apt-get -q update \ libsasl2-dev \ libsnappy-dev \ libssl-dev \ + libsnappy-dev \ libtool \ libzstd1-dev \ locales \ make \ pinentry-curses \ pkg-config \ - python \ - python2.7 \ - python-pip \ - python-pkg-resources \ - python-setuptools \ - python-wheel \ + python3 \ + python3-pip \ + python3-pkg-resources \ + python3-setuptools \ + python3-wheel \ rsync \ + shellcheck \ software-properties-common \ - snappy \ sudo \ valgrind \ zlib1g-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* +###### +# Set env vars required to build Hadoop +###### +ENV MAVEN_HOME /usr +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 + +####### +# Install SpotBugs 4.2.2 +####### +RUN mkdir -p /opt/spotbugs \ + && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ + -o /opt/spotbugs.tgz \ + && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ + && chmod +x /opt/spotbugs/bin/* +ENV SPOTBUGS_HOME /opt/spotbugs ####### # OpenJDK 8 @@ -93,20 +111,8 @@ RUN apt-get -q update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* - -###### -# Install cmake 3.1.0 (3.5.1 ships with Xenial) -###### -RUN mkdir -p /opt/cmake \ - && curl -L -s -S \ - https://cmake.org/files/v3.1/cmake-3.1.0-Linux-x86_64.tar.gz \ - -o /opt/cmake.tar.gz \ - && tar xzf /opt/cmake.tar.gz --strip-components 1 -C /opt/cmake -ENV CMAKE_HOME /opt/cmake -ENV PATH "${PATH}:/opt/cmake/bin" - ###### -# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial) +# Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic) ###### # hadolint ignore=DL3003 RUN mkdir -p /opt/protobuf-src \ @@ -123,7 +129,7 @@ ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" ###### -# Install Apache Maven 3.3.9 (3.3.9 ships with Xenial) +# Install Apache Maven 3.6.0 (3.6.0 ships with Bionic) ###### # hadolint ignore=DL3008 RUN apt-get -q update \ @@ -131,64 +137,29 @@ RUN apt-get -q update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* ENV MAVEN_HOME /usr +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 -###### -# Install findbugs 3.0.1 (3.0.1 ships with Xenial) -# Ant is needed for findbugs -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends findbugs ant \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV FINDBUGS_HOME /usr - -#### -# Install shellcheck (0.4.6, the latest as of 2017-09-26) -#### -# hadolint ignore=DL3008 -RUN add-apt-repository -y ppa:hvr/ghc \ - && apt-get -q update \ - && apt-get -q install -y --no-install-recommends shellcheck ghc-8.0.2 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -#### -# Install bats (0.4.0, the latest as of 2017-09-26, ships with Xenial) -#### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends bats \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -#### -# Install pylint at fixed version (2.0.0 removed python2 support) -# https://github.com/PyCQA/pylint/issues/2294 -#### -RUN pip2 install pylint==1.9.2 - -#### -# Install dateutil.parser -#### -RUN pip2 install python-dateutil==2.7.3 +# Install pylint and python-dateutil +RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 ### -# Install node.js 8.17.0 for web UI framework (4.2.6 ships with Xenial) +# Install node.js 10.x for web UI framework (4.2.6 ships with Xenial) ### -RUN curl -L -s -S https://deb.nodesource.com/setup_8.x | bash - \ - && apt-get install -y --no-install-recommends nodejs=8.17.0-1nodesource1 \ +# hadolint ignore=DL3008 +RUN curl -L -s -S https://deb.nodesource.com/setup_10.x | bash - \ + && apt-get install -y --no-install-recommends nodejs \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ && npm install -g bower@1.8.8 ### -## Install Yarn 1.12.1 for web UI framework +## Install Yarn 1.22.5 for web UI framework #### RUN curl -s -S https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \ && echo 'deb https://dl.yarnpkg.com/debian/ stable main' > /etc/apt/sources.list.d/yarn.list \ && apt-get -q update \ - && apt-get install -y --no-install-recommends yarn=1.21.1-1 \ + && apt-get install -y --no-install-recommends yarn=1.22.5-1 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* @@ -207,13 +178,16 @@ RUN curl -L -s -S \ ### ENV MAVEN_OPTS -Xms256m -Xmx1536m +# Skip gpg verification when downloading Yetus via yetus-wrapper +ENV HADOOP_SKIP_YETUS_VERIFICATION true + ### # Everything past this point is either not needed for testing or breaks Yetus. # So tell Yetus not to read the rest of the file: # YETUS CUT HERE ### -# Hugo static website generator (for new hadoop site and Ozone docs) +# Hugo static website generator for new hadoop site RUN curl -L -o hugo.deb https://github.com/gohugoio/hugo/releases/download/v0.58.3/hugo_0.58.3_Linux-64bit.deb \ && dpkg --install hugo.deb \ && rm hugo.deb diff --git a/dev-support/docker/Dockerfile_aarch64 b/dev-support/docker/Dockerfile_aarch64 index 8d3c3ad41ce02..62540fb9219dd 100644 --- a/dev-support/docker/Dockerfile_aarch64 +++ b/dev-support/docker/Dockerfile_aarch64 @@ -17,7 +17,7 @@ # Dockerfile for installing the necessary dependencies for building Hadoop. # See BUILDING.txt. -FROM ubuntu:xenial +FROM ubuntu:bionic WORKDIR /root @@ -35,17 +35,17 @@ ENV DEBCONF_TERSE true ###### # Install common dependencies from packages. Versions here are either # sufficient or irrelevant. -# -# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default -# Ubuntu Java. See Java section below! ###### # hadolint ignore=DL3008 RUN apt-get -q update \ && apt-get -q install -y --no-install-recommends \ + ant \ apt-utils \ + bats \ build-essential \ bzip2 \ clang \ + cmake \ curl \ doxygen \ fuse \ @@ -53,6 +53,7 @@ RUN apt-get -q update \ gcc \ git \ gnupg-agent \ + libbcprov-java \ libbz2-dev \ libcurl4-openssl-dev \ libfuse-dev \ @@ -65,56 +66,44 @@ RUN apt-get -q update \ libzstd1-dev \ locales \ make \ + maven \ + openjdk-11-jdk \ + openjdk-8-jdk \ pinentry-curses \ pkg-config \ - python \ - python2.7 \ - python-pip \ - python-pkg-resources \ - python-setuptools \ - python-wheel \ + python3 \ + python3-pip \ + python3-pkg-resources \ + python3-setuptools \ + python3-wheel \ rsync \ + shellcheck \ software-properties-common \ - snappy \ sudo \ valgrind \ zlib1g-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* +###### +# Set env vars required to build Hadoop +###### +ENV MAVEN_HOME /usr +# JAVA_HOME must be set in Maven >= 3.5.0 (MNG-6003) +ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-arm64 ####### -# OpenJDK 8 +# Install SpotBugs 4.2.2 ####### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends openjdk-8-jdk libbcprov-java \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - - -###### -# Install cmake 3.1.0 (3.5.1 ships with Xenial) -# There is no cmake binary available for aarch64. Build from source. -###### -# hadolint ignore=DL3003 -RUN mkdir -p /opt/cmake/src \ - && curl -L -s -S \ - https://cmake.org/files/v3.1/cmake-3.1.0-1-src.tar.bz2 \ - -o /opt/cmake/cmake-src.tar.bz2 \ - && tar xvjf /opt/cmake/cmake-src.tar.bz2 -C /opt/cmake/src \ - && cd /opt/cmake/src \ - && tar xvjf cmake-3.1.0.tar.bz2 \ - && cd cmake-3.1.0 && patch -p0 -i ../cmake-3.1.0-1.patch && mkdir .build && cd .build \ - && ../bootstrap --parallel=2 \ - && make -j2 && ./bin/cpack \ - && tar xzf cmake-3.1.0-Linux-aarch64.tar.gz --strip-components 1 -C /opt/cmake \ - && cd /opt/cmake && rm -rf /opt/cmake/src -ENV CMAKE_HOME /opt/cmake -ENV PATH "${PATH}:/opt/cmake/bin" +RUN mkdir -p /opt/spotbugs \ + && curl -L -s -S https://github.com/spotbugs/spotbugs/releases/download/4.2.2/spotbugs-4.2.2.tgz \ + -o /opt/spotbugs.tgz \ + && tar xzf /opt/spotbugs.tgz --strip-components 1 -C /opt/spotbugs \ + && chmod +x /opt/spotbugs/bin/* +ENV SPOTBUGS_HOME /opt/spotbugs ###### -# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial) +# Install Google Protobuf 3.7.1 (3.0.0 ships with Bionic) ###### # hadolint ignore=DL3003 RUN mkdir -p /opt/protobuf-src \ @@ -130,73 +119,28 @@ RUN mkdir -p /opt/protobuf-src \ ENV PROTOBUF_HOME /opt/protobuf ENV PATH "${PATH}:/opt/protobuf/bin" -###### -# Install Apache Maven 3.3.9 (3.3.9 ships with Xenial) -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends maven \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV MAVEN_HOME /usr - -###### -# Install findbugs 3.0.1 (3.0.1 ships with Xenial) -# Ant is needed for findbugs -###### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends findbugs ant \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* -ENV FINDBUGS_HOME /usr - -#### -# Install shellcheck (0.4.6, the latest as of 2017-09-26) -#### -# hadolint ignore=DL3008 -RUN add-apt-repository -y ppa:hvr/ghc \ - && apt-get -q update \ - && apt-get -q install -y --no-install-recommends shellcheck \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - #### -# Install bats (0.4.0, the latest as of 2017-09-26, ships with Xenial) +# Install pylint and python-dateutil #### -# hadolint ignore=DL3008 -RUN apt-get -q update \ - && apt-get -q install -y --no-install-recommends bats \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -#### -# Install pylint at fixed version (2.0.0 removed python2 support) -# https://github.com/PyCQA/pylint/issues/2294 -#### -RUN pip2 install pylint==1.9.2 - -#### -# Install dateutil.parser -#### -RUN pip2 install python-dateutil==2.7.3 +RUN pip3 install pylint==2.6.0 python-dateutil==2.8.1 ### -# Install node.js 8.17.0 for web UI framework (4.2.6 ships with Xenial) +# Install node.js 10.x for web UI framework (4.2.6 ships with Xenial) ### -RUN curl -L -s -S https://deb.nodesource.com/setup_8.x | bash - \ - && apt-get install -y --no-install-recommends nodejs=8.17.0-1nodesource1 \ +# hadolint ignore=DL3008 +RUN curl -L -s -S https://deb.nodesource.com/setup_10.x | bash - \ + && apt-get install -y --no-install-recommends nodejs \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ && npm install -g bower@1.8.8 ### -## Install Yarn 1.12.1 for web UI framework +## Install Yarn 1.22.5 for web UI framework #### RUN curl -s -S https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \ && echo 'deb https://dl.yarnpkg.com/debian/ stable main' > /etc/apt/sources.list.d/yarn.list \ && apt-get -q update \ - && apt-get install -y --no-install-recommends yarn=1.21.1-1 \ + && apt-get install -y --no-install-recommends yarn=1.22.5-1 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* @@ -216,6 +160,9 @@ RUN mkdir -p /opt/phantomjs \ ### ENV MAVEN_OPTS -Xms256m -Xmx1536m +# Skip gpg verification when downloading Yetus via yetus-wrapper +ENV HADOOP_SKIP_YETUS_VERIFICATION true + ### # Everything past this point is either not needed for testing or breaks Yetus. # So tell Yetus not to read the rest of the file: diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml index b0fd7325c6eb1..5caaacdfaf6a8 100644 --- a/hadoop-assemblies/pom.xml +++ b/hadoop-assemblies/pom.xml @@ -23,11 +23,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-assemblies - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Assemblies Apache Hadoop Assemblies diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml index 7c725d73e041e..871694209393f 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml @@ -56,6 +56,7 @@ **/build/** **/file:/** **/SecurityAuth.audit* + patchprocess/** diff --git a/hadoop-build-tools/pom.xml b/hadoop-build-tools/pom.xml index ed4c0ef9ce9ff..1483ac801b7d9 100644 --- a/hadoop-build-tools/pom.xml +++ b/hadoop-build-tools/pom.xml @@ -18,7 +18,7 @@ hadoop-main org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-build-tools diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml index 8f3d3f13824ef..96e28496d7e98 100644 --- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml +++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml @@ -119,7 +119,12 @@ - + + + + + diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml b/hadoop-client-modules/hadoop-client-api/pom.xml index 7ee7b85fec937..f1eb9351f7c6b 100644 --- a/hadoop-client-modules/hadoop-client-api/pom.xml +++ b/hadoop-client-modules/hadoop-client-api/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-client-api - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT jar Apache Hadoop Client @@ -143,6 +143,8 @@ org/w3c/dom/**/* org/xml/sax/* org/xml/sax/**/* + org/bouncycastle/* + org/bouncycastle/**/* diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml index 757b374ec0ab0..c279024066ba4 100644 --- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-client-check-invariants - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT pom diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml index 08b4fb27befd9..bb2087db06d15 100644 --- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-client-check-test-invariants - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT pom diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh b/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh index 08f9202972735..d77424e6b7899 100644 --- a/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh +++ b/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh @@ -43,6 +43,8 @@ allowed_expr+="|^org/apache/hadoop/" allowed_expr+="|^META-INF/" # * whatever under the "webapps" directory; for things shipped by yarn allowed_expr+="|^webapps/" +# * Resources files used by Hadoop YARN mini cluster +allowed_expr+="|^TERMINAL/" # * Hadoop's default configuration files, which have the form # "_module_-default.xml" allowed_expr+="|^[^-]*-default.xml$" @@ -54,6 +56,8 @@ allowed_expr+="|^org.apache.hadoop.application-classloader.properties$" # * Used by JavaSandboxLinuxContainerRuntime as a default, loaded # from root, so can't relocate. :( allowed_expr+="|^java.policy$" +# * Used by javax.annotation +allowed_expr+="|^jndi.properties$" # * allowing native libraries from rocksdb. Leaving native libraries as it is. allowed_expr+="|^librocksdbjni-linux32.so" allowed_expr+="|^librocksdbjni-linux64.so" diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml index 1a14549250c3e..09cc1c20dbe89 100644 --- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml +++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-client-integration-tests - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Checks that we can use the generated artifacts Apache Hadoop Client Packaging Integration Tests @@ -75,6 +75,27 @@ hadoop-client-minicluster test + + org.bouncycastle + bcprov-jdk15on + test + + + org.bouncycastle + bcpkix-jdk15on + test + + + javax.xml.bind + jaxb-api + test + + + javax.activation + activation + 1.1.1 + test + @@ -159,6 +180,12 @@ test test-jar + + org.apache.hadoop + hadoop-yarn-server-tests + test + test-jar + diff --git a/hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java b/hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java index 6022fbc688dde..2e304861babbd 100644 --- a/hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java +++ b/hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.conf.Configuration; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsConstants; +import org.apache.hadoop.yarn.server.MiniYARNCluster; /** * Ensure that we can perform operations against the shaded minicluster @@ -54,6 +56,7 @@ public class ITUseMiniCluster { LoggerFactory.getLogger(ITUseMiniCluster.class); private MiniDFSCluster cluster; + private MiniYARNCluster yarnCluster; private static final String TEST_PATH = "/foo/bar/cats/dee"; private static final String FILENAME = "test.file"; @@ -73,6 +76,12 @@ public void clusterUp() throws IOException { .numDataNodes(3) .build(); cluster.waitActive(); + + conf.set("yarn.scheduler.capacity.root.queues", "default"); + conf.setInt("yarn.scheduler.capacity.root.default.capacity", 100); + yarnCluster = new MiniYARNCluster(getClass().getName(), 1, 1, 1, 1); + yarnCluster.init(conf); + yarnCluster.start(); } @After @@ -80,6 +89,7 @@ public void clusterDown() { if (cluster != null) { cluster.close(); } + IOUtils.cleanupWithLogger(LOG, yarnCluster); } @Test diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index 52595d93523e9..9631081cd1af2 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-client-minicluster - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT jar Apache Hadoop Minicluster for Clients @@ -322,6 +322,10 @@ dnsjava dnsjava + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + org.bouncycastle:* @@ -719,13 +729,6 @@ testdata/* - - - org.apache.hadoop:hadoop-yarn-server-nodemanager:* - - TERMINAL/**/* - - @@ -772,6 +775,13 @@ about.html + + + org.eclipse.jetty.websocket:* + + about.html + + org.apache.hadoop:* @@ -811,15 +821,37 @@ */** - + org.eclipse.jetty:jetty-client */** + + org.eclipse.jetty:jetty-xml + + */** + + + + org.eclipse.jetty:jetty-http + + */** + + + + org.eclipse.jetty:jetty-util-ajax + + */** + + + + org.eclipse.jetty:jetty-server + + jetty-dir.css + + @@ -859,6 +891,8 @@ org/w3c/dom/**/* org/xml/sax/* org/xml/sax/**/* + org/bouncycastle/* + org/bouncycastle/**/* @@ -955,6 +989,13 @@ **/pom.xml + + javax/annotation/ + ${shaded.dependency.prefix}.javax.websocket. + + **/pom.xml + + jersey/ ${shaded.dependency.prefix}.jersey. diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml index 496023521bf89..a7cdfe6759d55 100644 --- a/hadoop-client-modules/hadoop-client-runtime/pom.xml +++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-client-runtime - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT jar Apache Hadoop Client @@ -158,12 +158,8 @@ com.google.code.findbugs:jsr305 io.dropwizard.metrics:metrics-core - org.eclipse.jetty.websocket:* org.eclipse.jetty:jetty-servlet org.eclipse.jetty:jetty-security - org.eclipse.jetty:jetty-client - org.eclipse.jetty:jetty-http - org.eclipse.jetty:jetty-xml org.ow2.asm:* org.bouncycastle:* @@ -213,6 +209,13 @@ about.html + + + org.eclipse.jetty.websocket:* + + about.html + + org.apache.kerby:kerb-util @@ -264,6 +267,8 @@ org/w3c/dom/**/* org/xml/sax/* org/xml/sax/**/* + org/bouncycastle/* + org/bouncycastle/**/* diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml index 9216a2e54a397..9788e59d0727e 100644 --- a/hadoop-client-modules/hadoop-client/pom.xml +++ b/hadoop-client-modules/hadoop-client/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project-dist hadoop-client - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Client aggregation pom with dependencies exposed Apache Hadoop Client Aggregator diff --git a/hadoop-client-modules/pom.xml b/hadoop-client-modules/pom.xml index 0895e31ca307f..c18fc44e9a6e9 100644 --- a/hadoop-client-modules/pom.xml +++ b/hadoop-client-modules/pom.xml @@ -18,7 +18,7 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-client-modules diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml index b5e35b079f9fd..2b540cae31099 100644 --- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-cloud-storage - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT jar Apache Hadoop Cloud Storage @@ -128,5 +128,10 @@ hadoop-openstack compile + + org.apache.hadoop + hadoop-cos + compile + diff --git a/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml b/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml index 40d78d0cd6cec..f8c3472640f25 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml +++ b/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml @@ -15,4 +15,9 @@ limitations under the License. --> + + + + + diff --git a/hadoop-cloud-storage-project/hadoop-cos/pom.xml b/hadoop-cloud-storage-project/hadoop-cos/pom.xml index 839bd04c9b643..c609a431d1932 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/pom.xml +++ b/hadoop-cloud-storage-project/hadoop-cos/pom.xml @@ -20,7 +20,7 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-cos @@ -64,10 +64,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml @@ -81,6 +80,22 @@ 3600 + + org.apache.maven.plugins + maven-dependency-plugin + + + deplist + compile + + list + + + ${project.basedir}/target/hadoop-cloud-storage-deps/${project.artifactId}.cloud-storage-optional.txt + + + + @@ -93,8 +108,8 @@ com.qcloud - cos_api - 5.4.9 + cos_api-bundle + 5.6.19 compile diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java index a4ee4d5be9ac8..409c9cb42f966 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java @@ -63,32 +63,27 @@ private BufferPool() { private File createDir(String dirPath) throws IOException { File dir = new File(dirPath); - if (null != dir) { - if (!dir.exists()) { - LOG.debug("Buffer dir: [{}] does not exists. create it first.", - dirPath); - if (dir.mkdirs()) { - if (!dir.setWritable(true) || !dir.setReadable(true) - || !dir.setExecutable(true)) { - LOG.warn("Set the buffer dir: [{}]'s permission [writable," - + "readable, executable] failed.", dir.getAbsolutePath()); - } - LOG.debug("Buffer dir: [{}] is created successfully.", - dir.getAbsolutePath()); - } else { - // Once again, check if it has been created successfully. - // Prevent problems created by multiple processes at the same time. - if (!dir.exists()) { - throw new IOException("buffer dir:" + dir.getAbsolutePath() - + " is created unsuccessfully"); - } + if (!dir.exists()) { + LOG.debug("Buffer dir: [{}] does not exists. create it first.", + dirPath); + if (dir.mkdirs()) { + if (!dir.setWritable(true) || !dir.setReadable(true) + || !dir.setExecutable(true)) { + LOG.warn("Set the buffer dir: [{}]'s permission [writable," + + "readable, executable] failed.", dir.getAbsolutePath()); } + LOG.debug("Buffer dir: [{}] is created successfully.", + dir.getAbsolutePath()); } else { - LOG.debug("buffer dir: {} already exists.", dirPath); + // Once again, check if it has been created successfully. + // Prevent problems created by multiple processes at the same time. + if (!dir.exists()) { + throw new IOException("buffer dir:" + dir.getAbsolutePath() + + " is created unsuccessfully"); + } } } else { - throw new IOException("creating buffer dir: " + dir.getAbsolutePath() - + "unsuccessfully."); + LOG.debug("buffer dir: {} already exists.", dirPath); } return dir; diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java index a5dcdda07120b..249e9e1ade82a 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java @@ -80,7 +80,6 @@ public CosNFileReadTask( public void run() { int retries = 0; RetryPolicy.RetryAction retryAction; - LOG.info(Thread.currentThread().getName() + "read ..."); try { this.readBuffer.lock(); do { diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java index 333b34929ecda..4dda1260731d3 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java @@ -28,11 +28,11 @@ import java.util.HashMap; import java.util.Set; import java.util.TreeSet; +import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -71,8 +71,8 @@ public class CosNFileSystem extends FileSystem { private String owner = "Unknown"; private String group = "Unknown"; - private ListeningExecutorService boundedIOThreadPool; - private ListeningExecutorService boundedCopyThreadPool; + private ExecutorService boundedIOThreadPool; + private ExecutorService boundedCopyThreadPool; public CosNFileSystem() { } diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNOutputStream.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNOutputStream.java index c437dde613d2c..bd449622906f1 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNOutputStream.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNOutputStream.java @@ -32,10 +32,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import com.qcloud.cos.model.PartETag; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java index 39981caba24bb..cdac15ffc619e 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java @@ -22,15 +22,16 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Modifier; +import java.net.URI; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.qcloud.cos.auth.COSCredentialsProvider; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.cosn.auth.COSCredentialProviderList; -import org.apache.hadoop.fs.cosn.auth.EnvironmentVariableCredentialProvider; -import org.apache.hadoop.fs.cosn.auth.SimpleCredentialProvider; +import org.apache.hadoop.fs.cosn.auth.COSCredentialsProviderList; +import org.apache.hadoop.fs.cosn.auth.EnvironmentVariableCredentialsProvider; +import org.apache.hadoop.fs.cosn.auth.SimpleCredentialsProvider; /** * Utility methods for CosN code. @@ -48,21 +49,23 @@ public final class CosNUtils { private CosNUtils() { } - public static COSCredentialProviderList createCosCredentialsProviderSet( + public static COSCredentialsProviderList createCosCredentialsProviderSet( + URI uri, Configuration conf) throws IOException { - COSCredentialProviderList credentialProviderList = - new COSCredentialProviderList(); + COSCredentialsProviderList credentialProviderList = + new COSCredentialsProviderList(); Class[] cosClasses = CosNUtils.loadCosProviderClasses( conf, CosNConfigKeys.COSN_CREDENTIALS_PROVIDER); if (0 == cosClasses.length) { - credentialProviderList.add(new SimpleCredentialProvider(conf)); - credentialProviderList.add(new EnvironmentVariableCredentialProvider()); + credentialProviderList.add( + new SimpleCredentialsProvider(uri, conf)); + credentialProviderList.add( + new EnvironmentVariableCredentialsProvider(uri, conf)); } else { for (Class credClass : cosClasses) { - credentialProviderList.add(createCOSCredentialProvider( - conf, + credentialProviderList.add(createCOSCredentialProvider(uri, conf, credClass)); } } @@ -83,16 +86,17 @@ public static Class[] loadCosProviderClasses( } public static COSCredentialsProvider createCOSCredentialProvider( + URI uri, Configuration conf, Class credClass) throws IOException { COSCredentialsProvider credentialsProvider; if (!COSCredentialsProvider.class.isAssignableFrom(credClass)) { - throw new IllegalArgumentException( - "class " + credClass + " " + NOT_COS_CREDENTIAL_PROVIDER); + throw new IllegalArgumentException("class " + credClass + " " + + NOT_COS_CREDENTIAL_PROVIDER); } if (Modifier.isAbstract(credClass.getModifiers())) { - throw new IllegalArgumentException( - "class " + credClass + " " + ABSTRACT_CREDENTIAL_PROVIDER); + throw new IllegalArgumentException("class " + credClass + " " + + ABSTRACT_CREDENTIAL_PROVIDER); } LOG.debug("Credential Provider class: " + credClass.getName()); @@ -112,8 +116,18 @@ public static COSCredentialsProvider createCOSCredentialProvider( return credentialsProvider; } - Method factory = getFactoryMethod( - credClass, COSCredentialsProvider.class, "getInstance"); + // new credClass(uri, conf) + constructor = getConstructor(credClass, URI.class, + Configuration.class); + if (null != constructor) { + credentialsProvider = + (COSCredentialsProvider) constructor.newInstance(uri, + conf); + return credentialsProvider; + } + + Method factory = getFactoryMethod(credClass, + COSCredentialsProvider.class, "getInstance"); if (null != factory) { credentialsProvider = (COSCredentialsProvider) factory.invoke(null); return credentialsProvider; diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java index 833f42d7be6e7..d2484c0e47b3c 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java @@ -34,6 +34,7 @@ import com.qcloud.cos.ClientConfig; import com.qcloud.cos.auth.BasicCOSCredentials; import com.qcloud.cos.auth.COSCredentials; +import com.qcloud.cos.endpoint.SuffixEndpointBuilder; import com.qcloud.cos.exception.CosClientException; import com.qcloud.cos.exception.CosServiceException; import com.qcloud.cos.http.HttpProtocol; @@ -64,7 +65,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.cosn.auth.COSCredentialProviderList; +import org.apache.hadoop.fs.cosn.auth.COSCredentialsProviderList; import org.apache.hadoop.util.VersionInfo; import org.apache.http.HttpStatus; @@ -89,9 +90,9 @@ class CosNativeFileSystemStore implements NativeFileSystemStore { * @throws IOException Initialize the COS client failed, * caused by incorrect options. */ - private void initCOSClient(Configuration conf) throws IOException { - COSCredentialProviderList credentialProviderList = - CosNUtils.createCosCredentialsProviderSet(conf); + private void initCOSClient(URI uri, Configuration conf) throws IOException { + COSCredentialsProviderList credentialProviderList = + CosNUtils.createCosCredentialsProviderSet(uri, conf); String region = conf.get(CosNConfigKeys.COSN_REGION_KEY); String endpointSuffix = conf.get( CosNConfigKeys.COSN_ENDPOINT_SUFFIX_KEY); @@ -113,7 +114,7 @@ private void initCOSClient(Configuration conf) throws IOException { ClientConfig config; if (null == region) { config = new ClientConfig(new Region("")); - config.setEndPointSuffix(endpointSuffix); + config.setEndpointBuilder(new SuffixEndpointBuilder(endpointSuffix)); } else { config = new ClientConfig(new Region(region)); } @@ -146,7 +147,7 @@ private void initCOSClient(Configuration conf) throws IOException { @Override public void initialize(URI uri, Configuration conf) throws IOException { try { - initCOSClient(conf); + initCOSClient(uri, conf); this.bucketName = uri.getHost(); } catch (Exception e) { handleException(e, ""); @@ -174,8 +175,8 @@ private void storeFileWithRetry(String key, InputStream inputStream, PutObjectResult putObjectResult = (PutObjectResult) callCOSClientWithRetry(putObjectRequest); - LOG.debug("Store file successfully. COS key: [{}], ETag: [{}], " - + "MD5: [{}].", key, putObjectResult.getETag(), new String(md5Hash)); + LOG.debug("Store file successfully. COS key: [{}], ETag: [{}].", + key, putObjectResult.getETag()); } catch (Exception e) { String errMsg = String.format("Store file failed. COS key: [%s], " + "exception: [%s]", key, e.toString()); @@ -196,8 +197,7 @@ private void storeFileWithRetry(String key, InputStream inputStream, public void storeFile(String key, File file, byte[] md5Hash) throws IOException { LOG.info("Store file from local path: [{}]. file length: [{}] COS key: " + - "[{}] MD5: [{}].", file.getCanonicalPath(), file.length(), key, - new String(md5Hash)); + "[{}]", file.getCanonicalPath(), file.length(), key); storeFileWithRetry(key, new BufferedInputStream(new FileInputStream(file)), md5Hash, file.length()); } @@ -218,7 +218,7 @@ public void storeFile( byte[] md5Hash, long contentLength) throws IOException { LOG.info("Store file from input stream. COS key: [{}], " - + "length: [{}], MD5: [{}].", key, contentLength, md5Hash); + + "length: [{}].", key, contentLength); storeFileWithRetry(key, inputStream, md5Hash, contentLength); } @@ -250,7 +250,11 @@ public void storeEmptyFile(String key) throws IOException { public PartETag uploadPart(File file, String key, String uploadId, int partNum) throws IOException { InputStream inputStream = new FileInputStream(file); - return uploadPart(inputStream, key, uploadId, partNum, file.length()); + try { + return uploadPart(inputStream, key, uploadId, partNum, file.length()); + } finally { + inputStream.close(); + } } @Override diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/AbstractCOSCredentialsProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/AbstractCOSCredentialsProvider.java new file mode 100644 index 0000000000000..1363a7934cba0 --- /dev/null +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/AbstractCOSCredentialsProvider.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.cosn.auth; + +import com.qcloud.cos.auth.COSCredentialsProvider; +import org.apache.hadoop.conf.Configuration; + +import javax.annotation.Nullable; +import java.net.URI; + +/** + * The base class for COS credential providers which take a URI or + * configuration in their constructor. + */ +public abstract class AbstractCOSCredentialsProvider + implements COSCredentialsProvider { + private final URI uri; + private final Configuration conf; + + public AbstractCOSCredentialsProvider(@Nullable URI uri, + Configuration conf) { + this.uri = uri; + this.conf = conf; + } + + public URI getUri() { + return uri; + } + + public Configuration getConf() { + return conf; + } +} \ No newline at end of file diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java similarity index 78% rename from hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java rename to hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java index e900b997e4858..d2d2f8c9a7cab 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java @@ -24,11 +24,10 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import com.qcloud.cos.auth.AnonymousCOSCredentials; import com.qcloud.cos.auth.COSCredentials; import com.qcloud.cos.auth.COSCredentialsProvider; -import com.qcloud.cos.exception.CosClientException; import com.qcloud.cos.utils.StringUtils; import org.slf4j.Logger; @@ -37,10 +36,10 @@ /** * a list of cos credentials provider. */ -public class COSCredentialProviderList implements +public class COSCredentialsProviderList implements COSCredentialsProvider, AutoCloseable { private static final Logger LOG = - LoggerFactory.getLogger(COSCredentialProviderList.class); + LoggerFactory.getLogger(COSCredentialsProviderList.class); private static final String NO_COS_CREDENTIAL_PROVIDERS = "No COS Credential Providers"; @@ -48,17 +47,17 @@ public class COSCredentialProviderList implements "Credentials requested after provider list was closed"; private final List providers = - new ArrayList<>(1); + new ArrayList(1); private boolean reuseLastProvider = true; private COSCredentialsProvider lastProvider; private final AtomicInteger refCount = new AtomicInteger(1); private final AtomicBoolean isClosed = new AtomicBoolean(false); - public COSCredentialProviderList() { + public COSCredentialsProviderList() { } - public COSCredentialProviderList( + public COSCredentialsProviderList( Collection providers) { this.providers.addAll(providers); } @@ -77,7 +76,7 @@ public void checkNotEmpty() { } } - public COSCredentialProviderList share() { + public COSCredentialsProviderList share() { Preconditions.checkState(!this.closed(), "Provider list is closed"); this.refCount.incrementAndGet(); return this; @@ -100,16 +99,13 @@ public COSCredentials getCredentials() { } for (COSCredentialsProvider provider : this.providers) { - try { - COSCredentials credentials = provider.getCredentials(); - if (!StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId()) - && !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey()) - || credentials instanceof AnonymousCOSCredentials) { - this.lastProvider = provider; - return credentials; - } - } catch (CosClientException e) { - LOG.warn("No credentials provided by {}: {}", provider, e.toString()); + COSCredentials credentials = provider.getCredentials(); + if (null != credentials + && !StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId()) + && !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey()) + || credentials instanceof AnonymousCOSCredentials) { + this.lastProvider = provider; + return credentials; } } @@ -117,6 +113,17 @@ public COSCredentials getCredentials() { "No COS Credentials provided by " + this.providers.toString()); } + @Override + public void refresh() { + if (this.closed()) { + return; + } + + for (COSCredentialsProvider cosCredentialsProvider : this.providers) { + cosCredentialsProvider.refresh(); + } + } + @Override public void close() throws Exception { if (this.closed()) { @@ -135,5 +142,4 @@ public void close() throws Exception { } } } - } diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialsProvider.java similarity index 70% rename from hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java rename to hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialsProvider.java index 0a7786b882f8b..baa76908b6147 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialsProvider.java @@ -20,16 +20,24 @@ import com.qcloud.cos.auth.BasicCOSCredentials; import com.qcloud.cos.auth.COSCredentials; import com.qcloud.cos.auth.COSCredentialsProvider; -import com.qcloud.cos.exception.CosClientException; import com.qcloud.cos.utils.StringUtils; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.cosn.Constants; +import javax.annotation.Nullable; +import java.net.URI; + /** - * the provider obtaining the cos credentials from the environment variables. + * The provider obtaining the cos credentials from the environment variables. */ -public class EnvironmentVariableCredentialProvider - implements COSCredentialsProvider { +public class EnvironmentVariableCredentialsProvider + extends AbstractCOSCredentialsProvider implements COSCredentialsProvider { + + public EnvironmentVariableCredentialsProvider(@Nullable URI uri, + Configuration conf) { + super(uri, conf); + } + @Override public COSCredentials getCredentials() { String secretId = System.getenv(Constants.COSN_SECRET_ID_ENV); @@ -41,15 +49,19 @@ public COSCredentials getCredentials() { if (!StringUtils.isNullOrEmpty(secretId) && !StringUtils.isNullOrEmpty(secretKey)) { return new BasicCOSCredentials(secretId, secretKey); - } else { - throw new CosClientException( - "Unable to load COS credentials from environment variables" + - "(COS_SECRET_ID or COS_SECRET_KEY)"); } + + return null; + } + + @Override + public void refresh() { } @Override public String toString() { - return "EnvironmentVariableCredentialProvider{}"; + return String.format("EnvironmentVariableCredentialsProvider{%s, %s}", + Constants.COSN_SECRET_ID_ENV, + Constants.COSN_SECRET_KEY_ENV); } } diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialsProvider.java similarity index 66% rename from hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java rename to hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialsProvider.java index f0635fc0d00cf..107574a87c3aa 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialsProvider.java @@ -20,35 +20,41 @@ import com.qcloud.cos.auth.BasicCOSCredentials; import com.qcloud.cos.auth.COSCredentials; import com.qcloud.cos.auth.COSCredentialsProvider; -import com.qcloud.cos.exception.CosClientException; - -import org.apache.commons.lang3.StringUtils; +import com.qcloud.cos.utils.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.cosn.CosNConfigKeys; +import javax.annotation.Nullable; +import java.net.URI; + /** * Get the credentials from the hadoop configuration. */ -public class SimpleCredentialProvider implements COSCredentialsProvider { +public class SimpleCredentialsProvider + extends AbstractCOSCredentialsProvider implements COSCredentialsProvider { private String secretId; private String secretKey; - public SimpleCredentialProvider(Configuration conf) { - this.secretId = conf.get( - CosNConfigKeys.COSN_SECRET_ID_KEY - ); - this.secretKey = conf.get( - CosNConfigKeys.COSN_SECRET_KEY_KEY - ); + public SimpleCredentialsProvider(@Nullable URI uri, Configuration conf) { + super(uri, conf); + if (null != conf) { + this.secretId = conf.get( + CosNConfigKeys.COSN_SECRET_ID_KEY); + this.secretKey = conf.get( + CosNConfigKeys.COSN_SECRET_KEY_KEY); + } } @Override public COSCredentials getCredentials() { - if (!StringUtils.isEmpty(this.secretId) - && !StringUtils.isEmpty(this.secretKey)) { + if (!StringUtils.isNullOrEmpty(this.secretId) + && !StringUtils.isNullOrEmpty(this.secretKey)) { return new BasicCOSCredentials(this.secretId, this.secretKey); } - throw new CosClientException("secret id or secret key is unset"); + return null; } + @Override + public void refresh() { + } } diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md b/hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md index d4f8728924348..9c96ac3659815 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md +++ b/hadoop-cloud-storage-project/hadoop-cos/src/site/markdown/cloud-storage/index.md @@ -130,20 +130,19 @@ Each user needs to properly configure the credentials ( User's secreteId and sec ```xml fs.cosn.credentials.provider - org.apache.hadoop.fs.auth.SimpleCredentialProvider + org.apache.hadoop.fs.auth.SimpleCredentialsProvider This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider: - 1.org.apache.hadoop.fs.auth.SimpleCredentialProvider: Obtain the secret id and secret key - from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in core-site.xml - 2.org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key from system environment variables named COS_SECRET_ID and COS_SECRET_KEY + 1.org.apache.hadoop.fs.auth.SimpleCredentialsProvider: Obtain the secret id and secret key from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in core-site.xml + 2.org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider: Obtain the secret id and secret key from system environment variables named COS_SECRET_ID and COS_SECRET_KEY If unspecified, the default order of credential providers is: - 1. org.apache.hadoop.fs.auth.SimpleCredentialProvider - 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider + 1. org.apache.hadoop.fs.auth.SimpleCredentialsProvider + 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider @@ -237,7 +236,7 @@ Hadoop-COS provides rich runtime properties to set, and most of these do not req | properties | description | default value | required | |:----------:|:-----------|:-------------:|:--------:| | fs.defaultFS | Configure the default file system used by Hadoop.| None | NO | -| fs.cosn.credentials.provider | This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider:
1. org.apache.hadoop.fs.cos.auth.SimpleCredentialProvider: Obtain the secret id and secret key from `fs.cosn.userinfo.secretId` and `fs.cosn.userinfo.secretKey` in core-site.xml;
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key from system environment variables named `COSN_SECRET_ID` and `COSN_SECRET_KEY`.

If unspecified, the default order of credential providers is:
1. org.apache.hadoop.fs.auth.SimpleCredentialProvider;
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider. | None | NO | +| fs.cosn.credentials.provider | This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider:
1. org.apache.hadoop.fs.cos.auth.SimpleCredentialsProvider: Obtain the secret id and secret key from `fs.cosn.userinfo.secretId` and `fs.cosn.userinfo.secretKey` in core-site.xml;
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider: Obtain the secret id and secret key from system environment variables named `COSN_SECRET_ID` and `COSN_SECRET_KEY`.

If unspecified, the default order of credential providers is:
1. org.apache.hadoop.fs.auth.SimpleCredentialsProvider;
2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialsProvider. | None | NO | | fs.cosn.userinfo.secretId/secretKey | The API key information of your account | None | YES | | fs.cosn.bucket.region | The region where the bucket is located. | None | YES | | fs.cosn.impl | The implementation class of the CosN filesystem. | None | YES | diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosCredentials.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosCredentials.java new file mode 100644 index 0000000000000..8b74f3639ddbd --- /dev/null +++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosCredentials.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.cosn; + +import com.qcloud.cos.auth.COSCredentials; +import com.qcloud.cos.auth.COSCredentialsProvider; +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +public class TestCosCredentials { + private static final Logger LOG = + LoggerFactory.getLogger(TestCosCredentials.class); + + private final URI fsUri; + + private final String testCosNSecretId = "secretId"; + private final String testCosNSecretKey = "secretKey"; + private final String testCosNEnvSecretId = "env_secretId"; + private final String testCosNEnvSecretKey = "env_secretKey"; + + public TestCosCredentials() throws URISyntaxException { + // A fake uri for tests. + this.fsUri = new URI("cosn://test-bucket-1250000000"); + } + + @Test + public void testSimpleCredentialsProvider() throws Throwable { + Configuration configuration = new Configuration(); + configuration.set(CosNConfigKeys.COSN_SECRET_ID_KEY, + testCosNSecretId); + configuration.set(CosNConfigKeys.COSN_SECRET_KEY_KEY, + testCosNSecretKey); + validateCredentials(this.fsUri, configuration); + } + + @Test + public void testEnvironmentCredentialsProvider() throws Throwable { + Configuration configuration = new Configuration(); + // Set EnvironmentVariableCredentialsProvider as the CosCredentials + // Provider. + configuration.set(CosNConfigKeys.COSN_CREDENTIALS_PROVIDER, + "org.apache.hadoop.fs.cosn.EnvironmentVariableCredentialsProvider"); + // Set the environment variables storing the secret id and secret key. + System.setProperty(Constants.COSN_SECRET_ID_ENV, testCosNEnvSecretId); + System.setProperty(Constants.COSN_SECRET_KEY_ENV, testCosNEnvSecretKey); + validateCredentials(this.fsUri, configuration); + } + + private void validateCredentials(URI uri, Configuration configuration) + throws IOException { + if (null != configuration) { + COSCredentialsProvider credentialsProvider = + CosNUtils.createCosCredentialsProviderSet(uri, configuration); + COSCredentials cosCredentials = credentialsProvider.getCredentials(); + assertNotNull("The cos credentials obtained is null.", cosCredentials); + if (configuration.get( + CosNConfigKeys.COSN_CREDENTIALS_PROVIDER).compareToIgnoreCase( + "org.apache.hadoop.fs.cosn.EnvironmentVariableCredentialsProvider") + == 0) { + if (null == cosCredentials.getCOSAccessKeyId() + || cosCredentials.getCOSAccessKeyId().isEmpty() + || null == cosCredentials.getCOSSecretKey() + || cosCredentials.getCOSSecretKey().isEmpty()) { + String failMessage = String.format( + "Test EnvironmentVariableCredentialsProvider failed. The " + + "expected is [secretId: %s, secretKey: %s], but got null or" + + " empty.", testCosNEnvSecretId, testCosNEnvSecretKey); + fail(failMessage); + } + + if (cosCredentials.getCOSAccessKeyId() + .compareTo(testCosNEnvSecretId) != 0 + || cosCredentials.getCOSSecretKey() + .compareTo(testCosNEnvSecretKey) != 0) { + String failMessage = String.format("Test " + + "EnvironmentVariableCredentialsProvider failed. " + + "The expected is [secretId: %s, secretKey: %s], but got is " + + "[secretId:%s, secretKey:%s].", testCosNEnvSecretId, + testCosNEnvSecretKey, cosCredentials.getCOSAccessKeyId(), + cosCredentials.getCOSSecretKey()); + } + // expected + } else { + if (null == cosCredentials.getCOSAccessKeyId() + || cosCredentials.getCOSAccessKeyId().isEmpty() + || null == cosCredentials.getCOSSecretKey() + || cosCredentials.getCOSSecretKey().isEmpty()) { + String failMessage = String.format( + "Test COSCredentials failed. The " + + "expected is [secretId: %s, secretKey: %s], but got null or" + + " empty.", testCosNSecretId, testCosNSecretKey); + fail(failMessage); + } + if (cosCredentials.getCOSAccessKeyId() + .compareTo(testCosNSecretId) != 0 + || cosCredentials.getCOSSecretKey() + .compareTo(testCosNSecretKey) != 0) { + String failMessage = String.format("Test " + + "EnvironmentVariableCredentialsProvider failed. " + + "The expected is [secretId: %s, secretKey: %s], but got is " + + "[secretId:%s, secretKey:%s].", testCosNSecretId, + testCosNSecretKey, cosCredentials.getCOSAccessKeyId(), + cosCredentials.getCOSSecretKey()); + fail(failMessage); + } + // expected + } + } + } +} diff --git a/hadoop-cloud-storage-project/pom.xml b/hadoop-cloud-storage-project/pom.xml index f39e8c3aaf9f8..4d6e7d0f12751 100644 --- a/hadoop-cloud-storage-project/pom.xml +++ b/hadoop-cloud-storage-project/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-cloud-storage-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Cloud Storage Project Apache Hadoop Cloud Storage Project pom diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml index 738f0ada8f1e9..96382cc47100b 100644 --- a/hadoop-common-project/hadoop-annotations/pom.xml +++ b/hadoop-common-project/hadoop-annotations/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-annotations - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Annotations Apache Hadoop Annotations jar diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml index fb904912999b8..f734a0fd7b61c 100644 --- a/hadoop-common-project/hadoop-auth-examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-auth-examples - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT war Apache Hadoop Auth Examples diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 20a3e7059b154..e1309caeac7d3 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-auth - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT jar Apache Hadoop Auth @@ -183,10 +183,19 @@ ${apacheds.version} test + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + compile + com.google.guava guava - compile + test + + + org.eclipse.jetty + jetty-server @@ -228,8 +237,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml @@ -252,7 +261,7 @@ package - javadoc + javadoc-no-fork diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java index 488400647cf06..32f4edfbc5710 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java @@ -153,7 +153,6 @@ private synchronized void setAuthCookie(HttpCookie cookie) { cookieHeaders = new HashMap<>(); cookieHeaders.put("Cookie", Arrays.asList(cookie.toString())); } - LOG.trace("Setting token value to {} ({})", authCookie, oldCookie); } private void setAuthCookieValue(String value) { diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java index 64d43307ffc2d..3bfa349880c5a 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.security.authentication.client; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.lang.reflect.Constructor; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.security.authentication.server.HttpConstants; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java index 94d11f48cf2a9..9f40c42d24135 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java @@ -19,6 +19,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.hadoop.security.authentication.util.*; +import org.eclipse.jetty.server.Response; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -619,11 +620,20 @@ && getMaxInactiveInterval() > 0) { KerberosAuthenticator.WWW_AUTHENTICATE))) { errCode = HttpServletResponse.SC_FORBIDDEN; } + // After Jetty 9.4.21, sendError() no longer allows a custom message. + // use setStatusWithReason() to set a custom message. + String reason; if (authenticationEx == null) { - httpResponse.sendError(errCode, "Authentication required"); + reason = "Authentication required"; } else { - httpResponse.sendError(errCode, authenticationEx.getMessage()); + reason = authenticationEx.getMessage(); } + + if (httpResponse instanceof Response) { + ((Response)httpResponse).setStatusWithReason(errCode, reason); + } + + httpResponse.sendError(errCode, reason); } } } diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandlerUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandlerUtil.java index d881c65893058..79739a487b431 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandlerUtil.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandlerUtil.java @@ -20,7 +20,7 @@ import java.util.Locale; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * This is a utility class designed to provide functionality related to diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java index 884398cb799ee..5e4b0e844275a 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java @@ -28,7 +28,7 @@ import java.security.interfaces.RSAPublicKey; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.util.CertificateUtil; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java index 50eeb2a965e27..703842f3e3915 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.security.authentication.server; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.commons.codec.binary.Base64; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/LdapAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/LdapAuthenticationHandler.java index 8cc8d03447a99..94ed5d44d2a68 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/LdapAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/LdapAuthenticationHandler.java @@ -38,8 +38,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * The {@link LdapAuthenticationHandler} implements the BASIC authentication diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java index 58a0adb237e70..b2499ff734bbe 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java @@ -30,8 +30,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Preconditions; -import com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; /** * The {@link MultiSchemeAuthenticationHandler} supports configuring multiple diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/FileSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/FileSignerSecretProvider.java index e8aa160a20877..c03703732cf08 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/FileSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/FileSignerSecretProvider.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.security.authentication.util; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java index 67c2c10237d49..a308cef190396 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java @@ -26,7 +26,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java index 4319aa5b0df98..95f0106a24424 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java @@ -22,7 +22,6 @@ import java.io.File; import java.io.IOException; import java.io.UnsupportedEncodingException; -import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.net.InetAddress; import java.net.UnknownHostException; @@ -73,21 +72,29 @@ private static Oid getNumericOidInstance(String oidName) { } } - public static Oid getOidInstance(String oidName) - throws ClassNotFoundException, GSSException, NoSuchFieldException, - IllegalAccessException { - Class oidClass; - if (IBM_JAVA) { - if ("NT_GSS_KRB5_PRINCIPAL".equals(oidName)) { - // IBM JDK GSSUtil class does not have field for krb5 principal oid - return new Oid("1.2.840.113554.1.2.2.1"); - } - oidClass = Class.forName("com.ibm.security.jgss.GSSUtil"); - } else { - oidClass = Class.forName("sun.security.jgss.GSSUtil"); + /** + * Returns the Oid instance from string oidName. + * Use {@link GSS_SPNEGO_MECH_OID}, {@link GSS_KRB5_MECH_OID}, + * or {@link NT_GSS_KRB5_PRINCIPAL_OID} instead. + * + * @return Oid instance + * @param oidName The oid Name + * @throws NoSuchFieldException if the input is not supported. + */ + @Deprecated + public static Oid getOidInstance(String oidName) + throws NoSuchFieldException { + switch (oidName) { + case "GSS_SPNEGO_MECH_OID": + return GSS_SPNEGO_MECH_OID; + case "GSS_KRB5_MECH_OID": + return GSS_KRB5_MECH_OID; + case "NT_GSS_KRB5_PRINCIPAL": + return NT_GSS_KRB5_PRINCIPAL_OID; + default: + throw new NoSuchFieldException( + "oidName: " + oidName + " is not supported."); } - Field oidField = oidClass.getDeclaredField(oidName); - return (Oid)oidField.get(oidClass); } /** diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java index 9245887832102..a57b744c2be0d 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.security.authentication.util; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.security.SecureRandom; import java.util.Random; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java index e516b5b76a05f..69a09c189be27 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java @@ -18,7 +18,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import javax.servlet.ServletContext; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java index f0c350ed9594b..a1cd6de8e5933 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.security.authentication.util; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.nio.ByteBuffer; import java.security.SecureRandom; import java.util.Collections; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java index 629b68bffbbd9..f10371b925758 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java @@ -301,11 +301,10 @@ public String call() throws Exception { GSSContext gssContext = null; try { String servicePrincipal = KerberosTestUtils.getServerPrincipal(); - Oid oid = - KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL"); + Oid oid = KerberosUtil.NT_GSS_KRB5_PRINCIPAL_OID; GSSName serviceName = gssManager.createName(servicePrincipal, oid); - oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID"); + oid = KerberosUtil.GSS_KRB5_MECH_OID; gssContext = gssManager.createContext(serviceName, oid, null, GSSContext.DEFAULT_LIFETIME); gssContext.requestCredDeleg(true); diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java index 7e5b10e641889..a7747398eec46 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java @@ -17,7 +17,7 @@ import java.util.Properties; import javax.servlet.ServletContext; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProviderCreator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProviderCreator.java index e567e7bfbafb3..cb59c2099fc2c 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProviderCreator.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProviderCreator.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.security.authentication.util; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceStability; /** diff --git a/hadoop-common-project/hadoop-common/HadoopCommon.cmake b/hadoop-common-project/hadoop-common/HadoopCommon.cmake index 4de70ac3f78e2..7628ecf628de6 100644 --- a/hadoop-common-project/hadoop-common/HadoopCommon.cmake +++ b/hadoop-common-project/hadoop-common/HadoopCommon.cmake @@ -193,7 +193,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux") elseif(CMAKE_SYSTEM_NAME STREQUAL "SunOS") # Solaris flags. 64-bit compilation is mandatory, and is checked earlier. hadoop_add_compiler_flags("-m64 -D_POSIX_C_SOURCE=200112L -D__EXTENSIONS__ -D_POSIX_PTHREAD_SEMANTICS") - set(CMAKE_CXX_FLAGS "-std=gnu++98 ${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_STANDARD 98) hadoop_add_linker_flags("-m64") # CMAKE_SYSTEM_PROCESSOR is set to the output of 'uname -p', which on Solaris is @@ -212,4 +212,4 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL "SunOS") endif() # Set GNU99 as the C standard to use -set(CMAKE_C_FLAGS "-std=gnu99 ${CMAKE_C_FLAGS}") \ No newline at end of file +set(CMAKE_C_STANDARD 99) \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml index cf5c3874d1063..23e39d055ffc5 100644 --- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml @@ -283,6 +283,10 @@ + + + + diff --git a/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch b/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch index 8f87d4092bcd3..2bd7b63f0178f 100644 --- a/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch +++ b/hadoop-common-project/hadoop-common/dev-support/jdiff-workaround.patch @@ -1,11 +1,12 @@ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java -index a277abd..ed7c709 100644 +index a277abd6e13..1d131d5db6e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java -@@ -43,18 +43,6 @@ +@@ -42,18 +42,6 @@ + @InterfaceAudience.Private public abstract MetricsSystem init(String prefix); - /** +- /** - * Register a metrics source - * @param the actual type of the source object - * @param source object to register @@ -17,15 +18,11 @@ index a277abd..ed7c709 100644 - */ - public abstract T register(String name, String desc, T source); - -- /** + /** * Unregister a metrics source * @param name of the source. This is the name you use to call register() - */ -@@ -77,18 +65,19 @@ - */ - @InterfaceAudience.Private +@@ -79,16 +67,16 @@ public abstract MetricsSource getSource(String name); -+ /** - * Register a metrics sink @@ -50,10 +47,10 @@ index a277abd..ed7c709 100644 /** * Register a callback interface for JMX events diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java -index 6986edb..eeea81f 100644 +index a6edf08e5a7..5b87be1ec67 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java -@@ -270,27 +270,6 @@ void registerSource(String name, String desc, MetricsSource source) { +@@ -269,31 +269,6 @@ void registerSource(String name, String desc, MetricsSource source) { LOG.debug("Registered source "+ name); } @@ -61,7 +58,11 @@ index 6986edb..eeea81f 100644 - T register(final String name, final String description, final T sink) { - LOG.debug(name +", "+ description); - if (allSinks.containsKey(name)) { -- LOG.warn("Sink "+ name +" already exists!"); +- if(sinks.get(name) == null) { +- registerSink(name, description, sink); +- } else { +- LOG.warn("Sink "+ name +" already exists!"); +- } - return sink; - } - allSinks.put(name, sink); @@ -82,10 +83,10 @@ index 6986edb..eeea81f 100644 checkNotNull(config, "config"); MetricsConfig conf = sinkConfigs.get(name); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java -index c19d238..f8412f1 100644 +index 7bc772f062a..f012c877d18 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java -@@ -130,8 +130,8 @@ public void testTagsForPrefix() throws Exception { +@@ -139,8 +139,8 @@ public void testTagsForPrefix() throws Exception { GangliaMetricsTestHelper.setDatagramSocket(gsink31, mockds31); // register the sinks diff --git a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_3.2.2.xml b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_3.2.2.xml new file mode 100644 index 0000000000000..40bea21f378fe --- /dev/null +++ b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_3.2.2.xml @@ -0,0 +1,35381 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + UnsupportedOperationException + + If a key is deprecated in favor of multiple keys, they are all treated as + aliases of each other, and setting any one of them resets all the others + to the new value. + + If you have multiple deprecation entries to add, it is more efficient to + use #addDeprecations(DeprecationDelta[] deltas) instead. + + @param key + @param newKeys + @param customMessage + @deprecated use {@link #addDeprecation(String key, String newKey, + String customMessage)} instead]]> + + + + + + + + UnsupportedOperationException + + If you have multiple deprecation entries to add, it is more efficient to + use #addDeprecations(DeprecationDelta[] deltas) instead. + + @param key + @param newKey + @param customMessage]]> + + + + + + + UnsupportedOperationException + + If a key is deprecated in favor of multiple keys, they are all treated as + aliases of each other, and setting any one of them resets all the others + to the new value. + + If you have multiple deprecation entries to add, it is more efficient to + use #addDeprecations(DeprecationDelta[] deltas) instead. + + @param key Key that is to be deprecated + @param newKeys list of keys that take up the values of deprecated key + @deprecated use {@link #addDeprecation(String key, String newKey)} instead]]> + + + + + + + UnsupportedOperationException + + If you have multiple deprecation entries to add, it is more efficient to + use #addDeprecations(DeprecationDelta[] deltas) instead. + + @param key Key that is to be deprecated + @param newKey key that takes up the value of deprecated key]]> + + + + + + key is deprecated. + + @param key the parameter which is to be checked for deprecation + @return true if the key is deprecated and + false otherwise.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + final. + + @param name resource to be added, the classpath is examined for a file + with that name.]]> + + + + + + + + + + final. + + @param url url of the resource to be added, the local filesystem is + examined directly to find the resource, without referring to + the classpath.]]> + + + + + + + + + + final. + + @param file file-path of resource to be added, the local filesystem is + examined directly to find the resource, without referring to + the classpath.]]> + + + + + + + + + + final. + + WARNING: The contents of the InputStream will be cached, by this method. + So use this sparingly because it does increase the memory consumption. + + @param in InputStream to deserialize the object from. In will be read from + when a get or set is called next. After it is read the stream will be + closed.]]> + + + + + + + + + + + final. + + @param in InputStream to deserialize the object from. + @param name the name of the resource because InputStream.toString is not + very descriptive some times.]]> + + + + + + + + + + + final. + + @param conf Configuration object from which to load properties]]> + + + + + + + + + + + name property, null if + no such property exists. If the key is deprecated, it returns the value of + the first key which replaces the deprecated key and is not null. + + Values are processed for variable expansion + before being returned. + + @param name the property name, will be trimmed before get value. + @return the value of the name or its replacing property, + or null if no such property exists.]]> + + + + + + + + + + + + + + + name property, but only for + names which have no valid value, usually non-existent or commented + out in XML. + + @param name the property name + @return true if the property name exists without value]]> + + + + + + name property as a trimmed String, + null if no such property exists. + If the key is deprecated, it returns the value of + the first key which replaces the deprecated key and is not null + + Values are processed for variable expansion + before being returned. + + @param name the property name. + @return the value of the name or its replacing property, + or null if no such property exists.]]> + + + + + + + name property as a trimmed String, + defaultValue if no such property exists. + See @{Configuration#getTrimmed} for more details. + + @param name the property name. + @param defaultValue the property default value. + @return the value of the name or defaultValue + if it is not set.]]> + + + + + + name property, without doing + variable expansion.If the key is + deprecated, it returns the value of the first key which replaces + the deprecated key and is not null. + + @param name the property name. + @return the value of the name property or + its replacing property and null if no such property exists.]]> + + + + + + + value of the name property. If + name is deprecated or there is a deprecated name associated to it, + it sets the value to both names. Name will be trimmed before put into + configuration. + + @param name property name. + @param value property value.]]> + + + + + + + + value of the name property. If + name is deprecated, it also sets the value to + the keys that replace the deprecated key. Name will be trimmed before put + into configuration. + + @param name property name. + @param value property value. + @param source the place that this configuration value came from + (For debugging). + @throws IllegalArgumentException when the value or name is null.]]> + + + + + + + + + + + + + + + + + + + + name. If the key is deprecated, + it returns the value of the first key which replaces the deprecated key + and is not null. + If no such property exists, + then defaultValue is returned. + + @param name property name, will be trimmed before get value. + @param defaultValue default value. + @return property value, or defaultValue if the property + doesn't exist.]]> + + + + + + + name property as an int. + + If no such property exists, the provided default value is returned, + or if the specified value is not a valid int, + then an error is thrown. + + @param name property name. + @param defaultValue default value. + @throws NumberFormatException when the value is invalid + @return property value as an int, + or defaultValue.]]> + + + + + + name property as a set of comma-delimited + int values. + + If no such property exists, an empty array is returned. + + @param name property name + @return property value interpreted as an array of comma-delimited + int values]]> + + + + + + + name property to an int. + + @param name property name. + @param value int value of the property.]]> + + + + + + + name property as a long. + If no such property exists, the provided default value is returned, + or if the specified value is not a valid long, + then an error is thrown. + + @param name property name. + @param defaultValue default value. + @throws NumberFormatException when the value is invalid + @return property value as a long, + or defaultValue.]]> + + + + + + + name property as a long or + human readable format. If no such property exists, the provided default + value is returned, or if the specified value is not a valid + long or human readable format, then an error is thrown. You + can use the following suffix (case insensitive): k(kilo), m(mega), g(giga), + t(tera), p(peta), e(exa) + + @param name property name. + @param defaultValue default value. + @throws NumberFormatException when the value is invalid + @return property value as a long, + or defaultValue.]]> + + + + + + + name property to a long. + + @param name property name. + @param value long value of the property.]]> + + + + + + + name property as a float. + If no such property exists, the provided default value is returned, + or if the specified value is not a valid float, + then an error is thrown. + + @param name property name. + @param defaultValue default value. + @throws NumberFormatException when the value is invalid + @return property value as a float, + or defaultValue.]]> + + + + + + + name property to a float. + + @param name property name. + @param value property value.]]> + + + + + + + name property as a double. + If no such property exists, the provided default value is returned, + or if the specified value is not a valid double, + then an error is thrown. + + @param name property name. + @param defaultValue default value. + @throws NumberFormatException when the value is invalid + @return property value as a double, + or defaultValue.]]> + + + + + + + name property to a double. + + @param name property name. + @param value property value.]]> + + + + + + + name property as a boolean. + If no such property is specified, or if the specified value is not a valid + boolean, then defaultValue is returned. + + @param name property name. + @param defaultValue default value. + @return property value as a boolean, + or defaultValue.]]> + + + + + + + name property to a boolean. + + @param name property name. + @param value boolean value of the property.]]> + + + + + + + + + + + + + + name property to the given type. This + is equivalent to set(<name>, value.toString()). + @param name property name + @param value new value]]> + + + + + + + + + + + + + + + name to the given time duration. This + is equivalent to set(<name>, value + <time suffix>). + @param name Property name + @param value Time duration + @param unit Unit of time]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + name property as a Pattern. + If no such property is specified, or if the specified value is not a valid + Pattern, then DefaultValue is returned. + Note that the returned value is NOT trimmed by this method. + + @param name property name + @param defaultValue default value + @return property value as a compiled Pattern, or defaultValue]]> + + + + + + + Pattern. + If the pattern is passed as null, sets the empty pattern which results in + further calls to getPattern(...) returning the default value. + + @param name property name + @param pattern new value]]> + + + + + + + + + + + + + + + + + + + name property as + a collection of Strings. + If no such property is specified then empty collection is returned. +

+ This is an optimized version of {@link #getStrings(String)} + + @param name property name. + @return property value as a collection of Strings.]]> + + + + + + name property as + an array of Strings. + If no such property is specified then null is returned. + + @param name property name. + @return property value as an array of Strings, + or null.]]> + + + + + + + name property as + an array of Strings. + If no such property is specified then default value is returned. + + @param name property name. + @param defaultValue The default value + @return property value as an array of Strings, + or default value.]]> + + + + + + name property as + a collection of Strings, trimmed of the leading and trailing whitespace. + If no such property is specified then empty Collection is returned. + + @param name property name. + @return property value as a collection of Strings, or empty Collection]]> + + + + + + name property as + an array of Strings, trimmed of the leading and trailing whitespace. + If no such property is specified then an empty array is returned. + + @param name property name. + @return property value as an array of trimmed Strings, + or empty array.]]> + + + + + + + name property as + an array of Strings, trimmed of the leading and trailing whitespace. + If no such property is specified then default value is returned. + + @param name property name. + @param defaultValue The default value + @return property value as an array of trimmed Strings, + or default value.]]> + + + + + + + name property as + as comma delimited values. + + @param name property name. + @param values The values]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + hostProperty as a + InetSocketAddress. If hostProperty is + null, addressProperty will be used. This + is useful for cases where we want to differentiate between host + bind address and address clients should use to establish connection. + + @param hostProperty bind host property name. + @param addressProperty address property name. + @param defaultAddressValue the default value + @param defaultPort the default port + @return InetSocketAddress]]> + + + + + + + + name property as a + InetSocketAddress. + @param name property name. + @param defaultAddress the default value + @param defaultPort the default port + @return InetSocketAddress]]> + + + + + + + name property as + a host:port.]]> + + + + + + + + + name property as a host:port. The wildcard + address is replaced with the local host's address. If the host and address + properties are configured the host component of the address will be combined + with the port component of the addr to generate the address. This is to allow + optional control over which host name is used in multi-home bind-host + cases where a host can have multiple names + @param hostProperty the bind-host configuration name + @param addressProperty the service address configuration name + @param defaultAddressValue the service default address configuration value + @param addr InetSocketAddress of the service listener + @return InetSocketAddress for clients to connect]]> + + + + + + + name property as a host:port. The wildcard + address is replaced with the local host's address. + @param name property name. + @param addr InetSocketAddress of a listener to store in the given property + @return InetSocketAddress for clients to connect]]> + + + + + + + + + + + + + + + + + + + + name property + as an array of Class. + The value of the property specifies a list of comma separated class names. + If no such property is specified, then defaultValue is + returned. + + @param name the property name. + @param defaultValue default value. + @return property value as a Class[], + or defaultValue.]]> + + + + + + + name property as a Class. + If no such property is specified, then defaultValue is + returned. + + @param name the class name. + @param defaultValue default value. + @return property value as a Class, + or defaultValue.]]> + + + + + + + + name property as a Class + implementing the interface specified by xface. + + If no such property is specified, then defaultValue is + returned. + + An exception is thrown if the returned class does not implement the named + interface. + + @param name the class name. + @param defaultValue default value. + @param xface the interface implemented by the named class. + @return property value as a Class, + or defaultValue.]]> + + + + + + + name property as a List + of objects implementing the interface specified by xface. + + An exception is thrown if any of the classes does not exist, or if it does + not implement the named interface. + + @param name the property name. + @param xface the interface implemented by the classes named by + name. + @return a List of objects implementing xface.]]> + + + + + + + + name property to the name of a + theClass implementing the given interface xface. + + An exception is thrown if theClass does not implement the + interface xface. + + @param name property name. + @param theClass property value. + @param xface the interface implemented by the named class.]]> + + + + + + + + dirsProp with + the given path. If dirsProp contains multiple directories, + then one is chosen based on path's hash code. If the selected + directory does not exist, an attempt is made to create it. + + @param dirsProp directory in which to locate the file. + @param path file-path. + @return local file under the directory with the given path.]]> + + + + + + + + dirsProp with + the given path. If dirsProp contains multiple directories, + then one is chosen based on path's hash code. If the selected + directory does not exist, an attempt is made to create it. + + @param dirsProp directory in which to locate the file. + @param path file-path. + @return local file under the directory with the given path.]]> + + + + + + + + + + + + name. + + @param name configuration resource name. + @return an input stream attached to the resource.]]> + + + + + + name. + + @param name configuration resource name. + @return a reader attached to the resource.]]> + + + + + + + + + + + + + + + + + + + + + + String + key-value pairs in the configuration. + + @return an iterator over the entries.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + When property name is not empty and the property exists in the + configuration, this method writes the property and its attributes + to the {@link Writer}. + +

+ +

  • + When property name is null or empty, this method writes all the + configuration properties and their attributes to the {@link Writer}. +
  • +

    + +

  • + When property name is not empty but the property doesn't exist in + the configuration, this method throws an {@link IllegalArgumentException}. +
  • +

    + @param out the writer to write to.]]> + + + + + + + + + + When propertyName is not empty, and the property exists + in the configuration, the format of the output would be, +

    +  {
    +    "property": {
    +      "key" : "key1",
    +      "value" : "value1",
    +      "isFinal" : "key1.isFinal",
    +      "resource" : "key1.resource"
    +    }
    +  }
    +  
    + + +
  • + When propertyName is null or empty, it behaves same as + {@link #dumpConfiguration(Configuration, Writer)}, the + output would be, +
    +  { "properties" :
    +      [ { key : "key1",
    +          value : "value1",
    +          isFinal : "key1.isFinal",
    +          resource : "key1.resource" },
    +        { key : "key2",
    +          value : "value2",
    +          isFinal : "ke2.isFinal",
    +          resource : "key2.resource" }
    +       ]
    +   }
    +  
    +
  • + +
  • + When propertyName is not empty, and the property is not + found in the configuration, this method will throw an + {@link IllegalArgumentException}. +
  • +

    + @param config the configuration + @param propertyName property name + @param out the Writer to write to + @throws IOException + @throws IllegalArgumentException when property name is not + empty and the property is not found in configuration]]> + + + + + + + + + { "properties" : + [ { key : "key1", + value : "value1", + isFinal : "key1.isFinal", + resource : "key1.resource" }, + { key : "key2", + value : "value2", + isFinal : "ke2.isFinal", + resource : "key2.resource" } + ] + } + + + It does not output the properties of the configuration object which + is loaded from an input stream. +

    + + @param config the configuration + @param out the Writer to write to + @throws IOException]]> + + + + + + + + + + + + + + + + + + + true to set quiet-mode on, false + to turn it off.]]> + + + + + + + + + + + + + + + + + + + + + with matching keys]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Resources + +

    Configurations are specified by resources. A resource contains a set of + name/value pairs as XML data. Each resource is named by either a + String or by a {@link Path}. If named by a String, + then the classpath is examined for a file with that name. If named by a + Path, then the local filesystem is examined directly, without + referring to the classpath. + +

    Unless explicitly turned off, Hadoop by default specifies two + resources, loaded in-order from the classpath:

      +
    1. + + core-default.xml: Read-only defaults for hadoop.
    2. +
    3. core-site.xml: Site-specific configuration for a given hadoop + installation.
    4. +
    + Applications may add additional resources, which are loaded + subsequent to these resources in the order they are added. + +

    Final Parameters

    + +

    Configuration parameters may be declared final. + Once a resource declares a value final, no subsequently-loaded + resource can alter that value. + For example, one might define a final parameter with: +

    +  <property>
    +    <name>dfs.hosts.include</name>
    +    <value>/etc/hadoop/conf/hosts.include</value>
    +    <final>true</final>
    +  </property>
    + + Administrators typically define parameters as final in + core-site.xml for values that user applications may not alter. + +

    Variable Expansion

    + +

    Value strings are first processed for variable expansion. The + available properties are:

      +
    1. Other properties defined in this Configuration; and, if a name is + undefined here,
    2. +
    3. Environment variables in {@link System#getenv()} if a name starts with + "env.", or
    4. +
    5. Properties in {@link System#getProperties()}.
    6. +
    + +

    For example, if a configuration resource contains the following property + definitions: +

    +  <property>
    +    <name>basedir</name>
    +    <value>/user/${user.name}</value>
    +  </property>
    +  
    +  <property>
    +    <name>tempdir</name>
    +    <value>${basedir}/tmp</value>
    +  </property>
    +
    +  <property>
    +    <name>otherdir</name>
    +    <value>${env.BASE_DIR}/other</value>
    +  </property>
    +  
    + +

    When conf.get("tempdir") is called, then ${basedir} + will be resolved to another property in this Configuration, while + ${user.name} would then ordinarily be resolved to the value + of the System property with that name. +

    When conf.get("otherdir") is called, then ${env.BASE_DIR} + will be resolved to the value of the ${BASE_DIR} environment variable. + It supports ${env.NAME:-default} and ${env.NAME-default} notations. + The former is resolved to "default" if ${NAME} environment variable is undefined + or its value is empty. + The latter behaves the same way only if ${NAME} is undefined. +

    By default, warnings will be given to any deprecated configuration + parameters and these are suppressible by configuring + log4j.logger.org.apache.hadoop.conf.Configuration.deprecation in + log4j.properties file. + +

    Tags

    + +

    Optionally we can tag related properties together by using tag + attributes. System tags are defined by hadoop.tags.system property. Users + can define there own custom tags in hadoop.tags.custom property. + +

    For example, we can tag existing property as: +

    +  <property>
    +    <name>dfs.replication</name>
    +    <value>3</value>
    +    <tag>HDFS,REQUIRED</tag>
    +  </property>
    +
    +  <property>
    +    <name>dfs.data.transfer.protection</name>
    +    <value>3</value>
    +    <tag>HDFS,SECURITY</tag>
    +  </property>
    + 
    +

    Properties marked with tags can be retrieved with conf + .getAllPropertiesByTag("HDFS") or conf.getAllPropertiesByTags + (Arrays.asList("YARN","SECURITY")).

    ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This implementation generates the key material and calls the + {@link #createKey(String, byte[], Options)} method. + + @param name the base name of the key + @param options the options for the new key. + @return the version name of the first version of the key. + @throws IOException + @throws NoSuchAlgorithmException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This implementation generates the key material and calls the + {@link #rollNewVersion(String, byte[])} method. + + @param name the basename of the key + @return the name of the new version of the key + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + KeyProvider implementations must be thread safe.]]> + + + + + + + + + + + + + + + + + + + + + + NULL if + a provider for the specified URI scheme could not be found. + @throws IOException thrown if the provider failed to initialize.]]> + + + + + + + + + + + + + + + + + + + + + + uri has syntax error]]> + + + + + + + + + + + + + + + + uri is + not found]]> + + + + + + + + + + + + + + + + + + + + + + + uri + determines a configuration property name, + fs.AbstractFileSystem.scheme.impl whose value names the + AbstractFileSystem class. + + The entire URI and conf is passed to the AbstractFileSystem factory method. + + @param uri for the file system to be created. + @param conf which is passed to the file system impl. + + @return file system for the given URI. + + @throws UnsupportedFileSystemException if the file system for + uri is not supported.]]> + + + + + + + + + + + + default port;]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + In some FileSystem implementations such as HDFS metadata + synchronization is essential to guarantee consistency of read requests + particularly in HA setting. + @throws IOException + @throws UnsupportedOperationException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + describing modifications + @throws IOException if an ACL could not be modified]]> + + + + + + + + describing entries to remove + @throws IOException if an ACL could not be modified]]> + + + + + + + + + + + + + + + + + + + + + + describing modifications, must include entries + for user, group, and others for compatibility with permission bits. + @throws IOException if an ACL could not be modified]]> + + + + + + + which returns each AclStatus + @throws IOException if an ACL could not be read]]> + + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to modify + @param name xattr name. + @param value xattr value. + @throws IOException]]> + + + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to modify + @param name xattr name. + @param value xattr value. + @param flag xattr set flag + @throws IOException]]> + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attribute + @param name xattr name. + @return byte[] xattr value. + @throws IOException]]> + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attributes + @return Map describing the XAttrs of the file or directory + @throws IOException]]> + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attributes + @param names XAttr names. + @return Map describing the XAttrs of the file or directory + @throws IOException]]> + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attributes + @return Map describing the XAttrs of the file or directory + @throws IOException]]> + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to remove extended attribute + @param name xattr name + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BlockLocation(offset: 0, length: BLOCK_SIZE, + hosts: {"host1:9866", "host2:9866, host3:9866"}) + + + And if the file is erasure-coded, each BlockLocation represents a logical + block groups. Value offset is the offset of a block group in the file and + value length is the total length of a block group. Hosts of a BlockLocation + are the datanodes that holding all the data blocks and parity blocks of a + block group. + Suppose we have a RS_3_2 coded file (3 data units and 2 parity units). + A BlockLocation example will be like: +
    + BlockLocation(offset: 0, length: 3 * BLOCK_SIZE, hosts: {"host1:9866",
    +   "host2:9866","host3:9866","host4:9866","host5:9866"})
    + 
    + + Please refer to + {@link FileSystem#getFileBlockLocations(FileStatus, long, long)} or + {@link FileContext#getFileBlockLocations(Path, long, long)} + for more examples.]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + After a successful call, buf.position() will be advanced by the number + of bytes read and buf.limit() should be unchanged. +

    + In the case of an exception, the values of buf.position() and buf.limit() + are undefined, and callers should be prepared to recover from this + eventuality. +

    + Many implementations will throw {@link UnsupportedOperationException}, so + callers that are not confident in support for this method from the + underlying filesystem should be prepared to handle that exception. +

    + Implementations should treat 0-length requests as legitimate, and must not + signal an error upon their receipt. + + @param buf + the ByteBuffer to receive the results of the read operation. + @return the number of bytes read, possibly zero, or -1 if + reach end-of-stream + @throws IOException + if there is some error performing the read]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + setReplication of FileSystem + @param src file name + @param replication new replication + @throws IOException + @return true if successful; + false if file does not exist or is a directory]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + core-default.xml]]> + + + + + + + + core-default.xml]]> + + + + + + + + core-default.xml]]> + + + + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + core-default.xml]]> + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + core-default.xml]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + EnumSet.of(CreateFlag.CREATE, CreateFlag.APPEND) + +

    + + Use the CreateFlag as follows: +

      +
    1. CREATE - to create a file if it does not exist, + else throw FileAlreadyExists.
    2. +
    3. APPEND - to append to a file if it exists, + else throw FileNotFoundException.
    4. +
    5. OVERWRITE - to truncate a file if it exists, + else throw FileNotFoundException.
    6. +
    7. CREATE|APPEND - to create a file if it does not exist, + else append to an existing file.
    8. +
    9. CREATE|OVERWRITE - to create a file if it does not exist, + else overwrite an existing file.
    10. +
    11. SYNC_BLOCK - to force closed blocks to the disk device. + In addition {@link Syncable#hsync()} should be called after each write, + if true synchronous behavior is required.
    12. +
    13. LAZY_PERSIST - Create the block on transient storage (RAM) if + available.
    14. +
    15. APPEND_NEWBLOCK - Append data to a new block instead of end of the last + partial block.
    16. +
    + + Following combinations are not valid and will result in + {@link HadoopIllegalArgumentException}: +
      +
    1. APPEND|OVERWRITE
    2. +
    3. CREATE|APPEND|OVERWRITE
    4. +
    ]]> +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + absOrFqPath is not supported. + @throws IOException If the file system for absOrFqPath could + not be instantiated.]]> + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + defaultFsUri is not supported]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + NewWdir can be one of: +
      +
    • relative path: "foo/bar";
    • +
    • absolute without scheme: "/foo/bar"
    • +
    • fully qualified with scheme: "xx://auth/foo/bar"
    • +
    +
    + Illegal WDs: +
      +
    • relative with scheme: "xx:foo/bar"
    • +
    • non existent directory
    • +
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + f does not exist + @throws AccessControlException if access denied + @throws IOException If an IO Error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server + + RuntimeExceptions: + @throws InvalidPathException If path f is not valid]]> + + + + + + + + + + + + + + + + + + + + +
  • Progress - to report progress on the operation - default null +
  • Permission - umask is applied against permission: default is + FsPermissions:getDefault() + +
  • CreateParent - create missing parent path; default is to not + to create parents +
  • The defaults for the following are SS defaults of the file + server implementing the target path. Not all parameters make sense + for all kinds of file system - eg. localFS ignores Blocksize, + replication, checksum +
      +
    • BufferSize - buffersize used in FSDataOutputStream +
    • Blocksize - block size for file blocks +
    • ReplicationFactor - replication for blocks +
    • ChecksumParam - Checksum parameters. server default is used + if not specified. +
    + + + @return {@link FSDataOutputStream} for created file + + @throws AccessControlException If access is denied + @throws FileAlreadyExistsException If file f already exists + @throws FileNotFoundException If parent of f does not exist + and createParent is false + @throws ParentNotDirectoryException If parent of f is not a + directory. + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server + + RuntimeExceptions: + @throws InvalidPathException If path f is not valid]]> + + + + + + + + + + + + + + + + + + + + + dir already + exists + @throws FileNotFoundException If parent of dir does not exist + and createParent is false + @throws ParentNotDirectoryException If parent of dir is not a + directory + @throws UnsupportedFileSystemException If file system for dir + is not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server + + RuntimeExceptions: + @throws InvalidPathException If path dir is not valid]]> + + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server + + RuntimeExceptions: + @throws InvalidPathException If path f is invalid]]> + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f + is not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + +
  • Fails if path is a directory. +
  • Fails if path does not exist. +
  • Fails if path is not closed. +
  • Fails if new size is greater than current size. + + @param f The path to the file to be truncated + @param newLength The size the file is to be truncated to + + @return true if the file has been truncated to the desired + newLength and is immediately available to be reused for + write operations such as append, or + false if a background process of adjusting the length of + the last block has been started, and clients should wait for it to + complete before proceeding with further file updates. + + @throws AccessControlException If access is denied + @throws FileNotFoundException If file f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + f does not exist + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + + + + +
  • Fails if src is a file and dst is a directory. +
  • Fails if src is a directory and dst is a file. +
  • Fails if the parent of dst does not exist or is a file. + +

    + If OVERWRITE option is not passed as an argument, rename fails if the dst + already exists. +

    + If OVERWRITE option is passed as an argument, rename overwrites the dst if + it is a file or an empty directory. Rename fails if dst is a non-empty + directory. +

    + Note that atomicity of rename is dependent on the file system + implementation. Please refer to the file system documentation for details +

    + + @param src path to be renamed + @param dst new path after rename + + @throws AccessControlException If access is denied + @throws FileAlreadyExistsException If dst already exists and + options has {@link Options.Rename#OVERWRITE} + option false. + @throws FileNotFoundException If src does not exist + @throws ParentNotDirectoryException If parent of dst is not a + directory + @throws UnsupportedFileSystemException If file system for src + and dst is not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f + is not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server + + RuntimeExceptions: + @throws HadoopIllegalArgumentException If username or + groupname is invalid.]]> + + + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + f does not exist + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred]]> + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If the given path does not refer to a symlink + or an I/O error occurred]]> + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + + + + + Given a path referring to a symlink of form: + + <---X---> + fs://host/A/B/link + <-----Y-----> + + In this path X is the scheme and authority that identify the file system, + and Y is the path leading up to the final path component "link". If Y is + a symlink itself then let Y' be the target of Y and X' be the scheme and + authority of Y'. Symlink targets may: + + 1. Fully qualified URIs + + fs://hostX/A/B/file Resolved according to the target file system. + + 2. Partially qualified URIs (eg scheme but no host) + + fs:///A/B/file Resolved according to the target file system. Eg resolving + a symlink to hdfs:///A results in an exception because + HDFS URIs must be fully qualified, while a symlink to + file:///A will not since Hadoop's local file systems + require partially qualified URIs. + + 3. Relative paths + + path Resolves to [Y'][path]. Eg if Y resolves to hdfs://host/A and path + is "../B/file" then [Y'][path] is hdfs://host/B/file + + 4. Absolute paths + + path Resolves to [X'][path]. Eg if Y resolves hdfs://host/A/B and path + is "/file" then [X][path] is hdfs://host/file + + + @param target the target of the symbolic link + @param link the path to be created that points to target + @param createParent if true then missing parent dirs are created if + false then parent must exist + + + @throws AccessControlException If access is denied + @throws FileAlreadyExistsException If file linkcode> already exists + @throws FileNotFoundException If target does not exist + @throws ParentNotDirectoryException If parent of link is not a + directory. + @throws UnsupportedFileSystemException If file system for + target or link is not supported + @throws IOException If an I/O error occurred]]> + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + + + + + + + f does not exist + @throws UnsupportedFileSystemException If file system for f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + f is + not supported + @throws IOException If an I/O error occurred + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + describing modifications + @throws IOException if an ACL could not be modified]]> + + + + + + + + describing entries to remove + @throws IOException if an ACL could not be modified]]> + + + + + + + + + + + + + + + + + + + + + + describing modifications, must include entries + for user, group, and others for compatibility with permission bits. + @throws IOException if an ACL could not be modified]]> + + + + + + + which returns each AclStatus + @throws IOException if an ACL could not be read]]> + + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to modify + @param name xattr name. + @param value xattr value. + @throws IOException]]> + + + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to modify + @param name xattr name. + @param value xattr value. + @param flag xattr set flag + @throws IOException]]> + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attribute + @param name xattr name. + @return byte[] xattr value. + @throws IOException]]> + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attributes + @return Map describing the XAttrs of the file or directory + @throws IOException]]> + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attributes + @param names XAttr names. + @return Map describing the XAttrs of the file or directory + @throws IOException]]> + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to remove extended attribute + @param name xattr name + @throws IOException]]> + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attributes + @return List of the XAttr names of the file or directory + @throws IOException]]> + + + + + + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + Exceptions applicable to file systems accessed over RPC: + @throws RpcClientException If an exception occurred in the RPC client + @throws RpcServerException If an exception occurred in the RPC server + @throws UnexpectedServerException If server implementation throws + undeclared exception to RPC server]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Path Names + + The Hadoop file system supports a URI namespace and URI names. This enables + multiple types of file systems to be referenced using fully-qualified URIs. + Two common Hadoop file system implementations are +

      +
    • the local file system: file:///path +
    • the HDFS file system: hdfs://nnAddress:nnPort/path +
    + + The Hadoop file system also supports additional naming schemes besides URIs. + Hadoop has the concept of a default file system, which implies a + default URI scheme and authority. This enables slash-relative names + relative to the default FS, which are more convenient for users and + application writers. The default FS is typically set by the user's + environment, though it can also be manually specified. +

    + + Hadoop also supports working-directory-relative names, which are paths + relative to the current working directory (similar to Unix). The working + directory can be in a different file system than the default FS. +

    + Thus, Hadoop path names can be specified as one of the following: +

      +
    • a fully-qualified URI: scheme://authority/path (e.g. + hdfs://nnAddress:nnPort/foo/bar) +
    • a slash-relative name: path relative to the default file system (e.g. + /foo/bar) +
    • a working-directory-relative name: path relative to the working dir (e.g. + foo/bar) +
    + Relative paths with scheme (scheme:foo/bar) are illegal. + +

    Role of FileContext and Configuration Defaults

    + + The FileContext is the analogue of per-process file-related state in Unix. It + contains two properties: + +
      +
    • the default file system (for resolving slash-relative names) +
    • the umask (for file permissions) +
    + In general, these properties are obtained from the default configuration file + in the user's environment (see {@link Configuration}). + + Further file system properties are specified on the server-side. File system + operations default to using these server-side defaults unless otherwise + specified. +

    + The file system related server-side defaults are: +

      +
    • the home directory (default is "/user/userName") +
    • the initial wd (only for local fs) +
    • replication factor +
    • block size +
    • buffer size +
    • encryptDataTransfer +
    • checksum option. (checksumType and bytesPerChecksum) +
    + +

    Example Usage

    + + Example 1: use the default config read from the $HADOOP_CONFIG/core.xml. + Unspecified values come from core-defaults.xml in the release jar. +
      +
    • myFContext = FileContext.getFileContext(); // uses the default config + // which has your default FS +
    • myFContext.create(path, ...); +
    • myFContext.setWorkingDir(path); +
    • myFContext.open (path, ...); +
    • ... +
    + Example 2: Get a FileContext with a specific URI as the default FS +
      +
    • myFContext = FileContext.getFileContext(URI); +
    • myFContext.create(path, ...); +
    • ... +
    + Example 3: FileContext with local file system as the default +
      +
    • myFContext = FileContext.getLocalFSFileContext(); +
    • myFContext.create(path, ...); +
    • ... +
    + Example 4: Use a specific config, ignoring $HADOOP_CONFIG + Generally you should not need use a config unless you are doing +
      +
    • configX = someConfigSomeOnePassedToYou; +
    • myFContext = getFileContext(configX); // configX is not changed, + // is passed down +
    • myFContext.create(path, ...); +
    • ... +
    ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This implementation throws an UnsupportedOperationException. + + @return the protocol scheme for this FileSystem. + @throws UnsupportedOperationException if the operation is unsupported + (default).]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
  • + If the configuration has the property + {@code "fs.$SCHEME.impl.disable.cache"} set to true, + a new instance will be created, initialized with the supplied URI and + configuration, then returned without being cached. +
  • +
  • + If the there is a cached FS instance matching the same URI, it will + be returned. +
  • +
  • + Otherwise: a new FS instance will be created, initialized with the + configuration and URI, cached and returned to the caller. +
  • + + @throws IOException if the FileSystem cannot be instantiated.]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + if f == null : + result = null + elif f.getLen() <= start: + result = [] + else result = [ locations(FS, b) for b in blocks(FS, p, s, s+l)] + + This call is most helpful with and distributed filesystem + where the hostnames of machines that contain blocks of the given file + can be determined. + + The default implementation returns an array containing one element: +
    + BlockLocation( { "localhost:9866" },  { "localhost" }, 0, file.getLen())
    + 
    + + In HDFS, if file is three-replicated, the returned array contains + elements like: +
    + BlockLocation(offset: 0, length: BLOCK_SIZE,
    +   hosts: {"host1:9866", "host2:9866, host3:9866"})
    + BlockLocation(offset: BLOCK_SIZE, length: BLOCK_SIZE,
    +   hosts: {"host2:9866", "host3:9866, host4:9866"})
    + 
    + + And if a file is erasure-coded, the returned BlockLocation are logical + block groups. + + Suppose we have a RS_3_2 coded file (3 data units and 2 parity units). + 1. If the file size is less than one stripe size, say 2 * CELL_SIZE, then + there will be one BlockLocation returned, with 0 offset, actual file size + and 4 hosts (2 data blocks and 2 parity blocks) hosting the actual blocks. + 3. If the file size is less than one group size but greater than one + stripe size, then there will be one BlockLocation returned, with 0 offset, + actual file size with 5 hosts (3 data blocks and 2 parity blocks) hosting + the actual blocks. + 4. If the file size is greater than one group size, 3 * BLOCK_SIZE + 123 + for example, then the result will be like: +
    + BlockLocation(offset: 0, length: 3 * BLOCK_SIZE, hosts: {"host1:9866",
    +   "host2:9866","host3:9866","host4:9866","host5:9866"})
    + BlockLocation(offset: 3 * BLOCK_SIZE, length: 123, hosts: {"host1:9866",
    +   "host4:9866", "host5:9866"})
    + 
    + + @param file FilesStatus to get data from + @param start offset into the given file + @param len length for which to get locations for + @throws IOException IO failure]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Important: the default implementation is not atomic + @param f path to use for create + @throws IOException IO failure]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
  • Fails if src is a file and dst is a directory.
  • +
  • Fails if src is a directory and dst is a file.
  • +
  • Fails if the parent of dst does not exist or is a file.
  • + +

    + If OVERWRITE option is not passed as an argument, rename fails + if the dst already exists. +

    + If OVERWRITE option is passed as an argument, rename overwrites + the dst if it is a file or an empty directory. Rename fails if dst is + a non-empty directory. +

    + Note that atomicity of rename is dependent on the file system + implementation. Please refer to the file system documentation for + details. This default implementation is non atomic. +

    + This method is deprecated since it is a temporary method added to + support the transition from FileSystem to FileContext for user + applications. + + @param src path to be renamed + @param dst new path after rename + @throws FileNotFoundException src path does not exist, or the parent + path of dst does not exist. + @throws FileAlreadyExistsException dest path exists and is a file + @throws ParentNotDirectoryException if the parent path of dest is not + a directory + @throws IOException on failure]]> + + + + + + + + +

  • Fails if path is a directory.
  • +
  • Fails if path does not exist.
  • +
  • Fails if path is not closed.
  • +
  • Fails if new size is greater than current size.
  • + + @param f The path to the file to be truncated + @param newLength The size the file is to be truncated to + + @return true if the file has been truncated to the desired + newLength and is immediately available to be reused for + write operations such as append, or + false if a background process of adjusting the length of + the last block has been started, and clients should wait for it to + complete before proceeding with further file updates. + @throws IOException IO failure + @throws UnsupportedOperationException if the operation is unsupported + (default).]]> +
    +
    + + + + + + + + + + + + + + + + + + + + +
  • Clean shutdown of the JVM cannot be guaranteed.
  • +
  • The time to shut down a FileSystem will depends on the number of + files to delete. For filesystems where the cost of checking + for the existence of a file/directory and the actual delete operation + (for example: object stores) is high, the time to shutdown the JVM can be + significantly extended by over-use of this feature.
  • +
  • Connectivity problems with a remote filesystem may delay shutdown + further, and may cause the files to not be deleted.
  • + + @param f the path to delete. + @return true if deleteOnExit is successful, otherwise false. + @throws IOException IO failure]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Does not guarantee to return the List of files/directories status in a + sorted order. +

    + Will not return null. Expect IOException upon access error. + @param f given path + @return the statuses of the files/directories in the given patch + @throws FileNotFoundException when the path does not exist + @throws IOException see specific implementation]]> + + + + + + + + + + + + + + + + + Does not guarantee to return the List of files/directories status in a + sorted order. + + @param f + a path name + @param filter + the user-supplied path filter + @return an array of FileStatus objects for the files under the given path + after applying the filter + @throws FileNotFoundException when the path does not exist + @throws IOException see specific implementation]]> + + + + + + + + + Does not guarantee to return the List of files/directories status in a + sorted order. + + @param files + a list of paths + @return a list of statuses for the files under the given paths after + applying the filter default Path filter + @throws FileNotFoundException when the path does not exist + @throws IOException see specific implementation]]> + + + + + + + + + + Does not guarantee to return the List of files/directories status in a + sorted order. + + @param files + a list of paths + @param filter + the user-supplied path filter + @return a list of statuses for the files under the given paths after + applying the filter + @throws FileNotFoundException when the path does not exist + @throws IOException see specific implementation]]> + + + + + + + Return all the files that match filePattern and are not checksum + files. Results are sorted by their names. + +

    + A filename pattern is composed of regular characters and + special pattern matching characters, which are: + +

    +
    +
    +

    +

    ? +
    Matches any single character. + +

    +

    * +
    Matches zero or more characters. + +

    +

    [abc] +
    Matches a single character from character set + {a,b,c}. + +

    +

    [a-b] +
    Matches a single character from the character range + {a...b}. Note that character a must be + lexicographically less than or equal to character b. + +

    +

    [^a] +
    Matches a single character that is not from character set or range + {a}. Note that the ^ character must occur + immediately to the right of the opening bracket. + +

    +

    \c +
    Removes (escapes) any special meaning of character c. + +

    +

    {ab,cd} +
    Matches a string from the string set {ab, cd} + +

    +

    {ab,c{de,fh}} +
    Matches a string from the string set {ab, cde, cfh} + +
    +
    +
    + + @param pathPattern a glob specifying a path pattern + + @return an array of paths that match the path pattern + @throws IOException IO failure]]> +
    +
    + + + + + + + + + + + + + + f does not exist + @throws IOException If an I/O error occurred]]> + + + + + + + + + f does not exist + @throws IOException if any I/O error occurred]]> + + + + + + + + p does not exist + @throws IOException if any I/O error occurred]]> + + + + + + + + + + If the path is a directory, + if recursive is false, returns files in the directory; + if recursive is true, return files in the subtree rooted at the path. + If the path is a file, return the file's status and block locations. + + @param f is the path + @param recursive if the subdirectories need to be traversed recursively + + @return an iterator that traverses statuses of the files + + @throws FileNotFoundException when the path does not exist; + @throws IOException see specific implementation]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + undefined. + @throws IOException IO failure]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + In some FileSystem implementations such as HDFS metadata + synchronization is essential to guarantee consistency of read requests + particularly in HA setting. + @throws IOException + @throws UnsupportedOperationException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + describing modifications + @throws IOException if an ACL could not be modified + @throws UnsupportedOperationException if the operation is unsupported + (default outcome).]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to modify + @param name xattr name. + @param value xattr value. + @throws IOException IO failure + @throws UnsupportedOperationException if the operation is unsupported + (default outcome).]]> + + + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to modify + @param name xattr name. + @param value xattr value. + @param flag xattr set flag + @throws IOException IO failure + @throws UnsupportedOperationException if the operation is unsupported + (default outcome).]]> + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attribute + @param name xattr name. + @return byte[] xattr value. + @throws IOException IO failure + @throws UnsupportedOperationException if the operation is unsupported + (default outcome).]]> + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attributes + @return Map describing the XAttrs of the file or directory + @throws IOException IO failure + @throws UnsupportedOperationException if the operation is unsupported + (default outcome).]]> + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attributes + @param names XAttr names. + @return Map describing the XAttrs of the file or directory + @throws IOException IO failure + @throws UnsupportedOperationException if the operation is unsupported + (default outcome).]]> + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to get extended attributes + @return List of the XAttr names of the file or directory + @throws IOException IO failure + @throws UnsupportedOperationException if the operation is unsupported + (default outcome).]]> + + + + + + + + + Refer to the HDFS extended attributes user documentation for details. + + @param path Path to remove extended attribute + @param name xattr name + @throws IOException IO failure + @throws UnsupportedOperationException if the operation is unsupported + (default outcome).]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This is a default method which is intended to be overridden by + subclasses. The default implementation returns an empty storage statistics + object.

    + + @return The StorageStatistics for this FileSystem instance. + Will never be null.]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + All user code that may potentially use the Hadoop Distributed + File System should be written to use a FileSystem object or its + successor, {@link FileContext}. + +

    + The local implementation is {@link LocalFileSystem} and distributed + implementation is DistributedFileSystem. There are other implementations + for object stores and (outside the Apache Hadoop codebase), + third party filesystems. +

    + Notes +

      +
    1. The behaviour of the filesystem is + + specified in the Hadoop documentation. + However, the normative specification of the behavior of this class is + actually HDFS: if HDFS does not behave the way these Javadocs or + the specification in the Hadoop documentations define, assume that + the documentation is incorrect. +
    2. +
    3. The term {@code FileSystem} refers to an instance of this class.
    4. +
    5. The acronym "FS" is used as an abbreviation of FileSystem.
    6. +
    7. The term {@code filesystem} refers to the distributed/local filesystem + itself, rather than the class used to interact with it.
    8. +
    9. The term "file" refers to a file in the remote filesystem, + rather than instances of {@code java.io.File}.
    10. +
    ]]> +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + caller's environment variables to use + for expansion + @return String[] with absolute path to new jar in position 0 and + unexpanded wild card entry path in position 1 + @throws IOException if there is an I/O error while writing the jar file]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FilterFileSystem contains + some other file system, which it uses as + its basic file system, possibly transforming + the data along the way or providing additional + functionality. The class FilterFileSystem + itself simply overrides all methods of + FileSystem with versions that + pass all requests to the contained file + system. Subclasses of FilterFileSystem + may further override some of these methods + and may also provide additional methods + and fields.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -1 + if there is no more data because the end of the stream has been + reached]]> + + + + + + + + + + length bytes have been read. + + @param position position in the input stream to seek + @param buffer buffer into which data is read + @param offset offset into the buffer in which data is written + @param length the number of bytes to read + @throws IOException IO problems + @throws EOFException If the end of stream is reached while reading. + If an exception is thrown an undetermined number + of bytes in the buffer may have been written.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + // Don't + if (fs instanceof FooFileSystem) { + FooFileSystem fs = (FooFileSystem) fs; + OutputStream out = dfs.createFile(path) + .optionA() + .optionB("value") + .cache() + .build() + } else if (fs instanceof BarFileSystem) { + ... + } + + // Do + OutputStream out = fs.createFile(path) + .permission(perm) + .bufferSize(bufSize) + .opt("foofs:option.a", true) + .opt("foofs:option.b", "value") + .opt("barfs:cache", true) + .must("foofs:cache", true) + .must("barfs:cache-size", 256 * 1024 * 1024) + .build(); + + + If the option is not related to the file system, the option will be ignored. + If the option is must, but not supported by the file system, a + {@link IllegalArgumentException} will be thrown.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + path is invalid]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @return file]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + and the scheme is null, and the authority + is null. + + @return whether the path is absolute and the URI has no scheme nor + authority parts]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if and only if pathname + should be included]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Warning: Not all filesystems satisfy the thread-safety requirement. + @param position position within file + @param buffer destination buffer + @param offset offset in the buffer + @param length number of bytes to read + @return actual number of bytes read; -1 means "none" + @throws IOException IO problems.]]> + + + + + + + + + + Warning: Not all filesystems satisfy the thread-safety requirement. + @param position position within file + @param buffer destination buffer + @param offset offset in the buffer + @param length number of bytes to read + @throws IOException IO problems. + @throws EOFException the end of the data was reached before + the read operation completed]]> + + + + + + + + Warning: Not all filesystems satisfy the thread-safety requirement. + @param position position within file + @param buffer destination buffer + @throws IOException IO problems. + @throws EOFException the end of the data was reached before + the read operation completed]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <----15----> <----15----> <----15----> <-------18-------> + QUOTA REMAINING_QUATA SPACE_QUOTA SPACE_QUOTA_REM FILE_NAME]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Note: Returned list is not sorted in any given order, + due to reliance on Java's {@link File#list()} API.)]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + XAttr is byte[], this class is to + covert byte[] to some kind of string representation or convert back. + String representation is convenient for display and input. For example + display in screen as shell response and json response, input as http + or shell parameter.]]> + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + @return ftp]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A {@link FileSystem} backed by an FTP client provided by Apache Commons Net. +

    ]]> +
    +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Since these methods are often vendor- or device-specific, operators + may implement this interface in order to achieve fencing. +

    + Fencing is configured by the operator as an ordered list of methods to + attempt. Each method will be tried in turn, and the next in the list + will only be attempted if the previous one fails. See {@link NodeFencer} + for more information. +

    + If an implementation also implements {@link Configurable} then its + setConf method will be called upon instantiation.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + state (e.g ACTIVE/STANDBY) as well as + some additional information. + + @throws AccessControlException + if access is denied. + @throws IOException + if other errors happen + @see HAServiceStatus]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + hadoop.http.filter.initializers. + +

      +
    • StaticUserWebFilter - An authorization plugin that makes all +users a static configured user. +
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + public class IntArrayWritable extends ArrayWritable { + public IntArrayWritable() { + super(IntWritable.class); + } + } + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + o is a ByteWritable with the same value.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + the class of the item + @param conf the configuration to store + @param item the object to be stored + @param keyName the name of the key to use + @throws IOException : forwards Exceptions from the underlying + {@link Serialization} classes.]]> + + + + + + + + + the class of the item + @param conf the configuration to use + @param keyName the name of the key to use + @param itemClass the class of the item + @return restored object + @throws IOException : forwards Exceptions from the underlying + {@link Serialization} classes.]]> + + + + + + + + + the class of the item + @param conf the configuration to use + @param items the objects to be stored + @param keyName the name of the key to use + @throws IndexOutOfBoundsException if the items array is empty + @throws IOException : forwards Exceptions from the underlying + {@link Serialization} classes.]]> + + + + + + + + + the class of the item + @param conf the configuration to use + @param keyName the name of the key to use + @param itemClass the class of the item + @return restored object + @throws IOException : forwards Exceptions from the underlying + {@link Serialization} classes.]]> + + + + + DefaultStringifier offers convenience methods to store/load objects to/from + the configuration. + + @param the class of the objects to stringify]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + o is a DoubleWritable with the same value.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + value argument is null or + its size is zero, the elementType argument must not be null. If + the argument value's size is bigger than zero, the argument + elementType is not be used. + + @param value + @param elementType]]> + + + + + value should not be null + or empty. + + @param value]]> + + + + + + + + + + + + + + value and elementType. If the value argument + is null or its size is zero, the elementType argument must not be + null. If the argument value's size is bigger than zero, the + argument elementType is not be used. + + @param value + @param elementType]]> + + + + + + + + + + + + + + + + + + + o is an EnumSetWritable with the same value, + or both are null.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + o is a FloatWritable with the same value.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + When two sequence files, which have same Key type but different Value + types, are mapped out to reduce, multiple Value types is not allowed. + In this case, this class can help you wrap instances with different types. +

    + +

    + Compared with ObjectWritable, this class is much more effective, + because ObjectWritable will append the class declaration as a String + into the output file in every Key-Value pair. +

    + +

    + Generic Writable implements {@link Configurable} interface, so that it will be + configured by the framework. The configuration is passed to the wrapped objects + implementing {@link Configurable} interface before deserialization. +

    + + how to use it:
    + 1. Write your own class, such as GenericObject, which extends GenericWritable.
    + 2. Implements the abstract method getTypes(), defines + the classes which will be wrapped in GenericObject in application. + Attention: this classes defined in getTypes() method, must + implement Writable interface. +

    + + The code looks like this: +
    + public class GenericObject extends GenericWritable {
    + 
    +   private static Class[] CLASSES = {
    +               ClassType1.class, 
    +               ClassType2.class,
    +               ClassType3.class,
    +               };
    +
    +   protected Class[] getTypes() {
    +       return CLASSES;
    +   }
    +
    + }
    + 
    + + @since Nov 8, 2006]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + o is a IntWritable with the same value.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + closes the input and output streams + at the end. + + @param in InputStrem to read from + @param out OutputStream to write to + @param conf the Configuration object]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ignore any {@link Throwable} or + null pointers. Must only be used for cleanup in exception handlers. + + @param log the log to record problems to at debug level. Can be null. + @param closeables the objects to close + @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)} + instead]]> + + + + + + + ignore any {@link Throwable} or + null pointers. Must only be used for cleanup in exception handlers. + + @param logger the log to record problems to at debug level. Can be null. + @param closeables the objects to close]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This is better than File#listDir because it does not ignore IOExceptions. + + @param dir The directory to list. + @param filter If non-null, the filter to use when listing + this directory. + @return The list of files in the directory. + + @throws IOException On I/O error]]> + + + + + + + + Borrowed from Uwe Schindler in LUCENE-5588 + @param fileToSync the file to fsync]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + o is a LongWritable with the same value.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A map is a directory containing two files, the data file, + containing all keys and values in the map, and a smaller index + file, containing a fraction of the keys. The fraction is determined by + {@link Writer#getIndexInterval()}. + +

    The index file is read entirely into memory. Thus key implementations + should try to keep themselves small. + +

    Map files are created by adding entries in-order. To maintain a large + database, perform updates by copying the previous version of a database and + merging in a sorted change list, to create a new version of the database in + a new file. Sorting large change lists can be done with {@link + SequenceFile.Sorter}.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + o is an MD5Hash whose digest contains the + same values.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + className by first finding + it in the specified conf. If the specified conf is null, + try load it directly.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A {@link Comparator} that operates directly on byte representations of + objects. +

    + @param + @see DeserializerComparator]]> +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFiles are flat files consisting of binary key/value + pairs. + +

    SequenceFile provides {@link SequenceFile.Writer}, + {@link SequenceFile.Reader} and {@link Sorter} classes for writing, + reading and sorting respectively.

    + + There are three SequenceFile Writers based on the + {@link CompressionType} used to compress key/value pairs: +
      +
    1. + Writer : Uncompressed records. +
    2. +
    3. + RecordCompressWriter : Record-compressed files, only compress + values. +
    4. +
    5. + BlockCompressWriter : Block-compressed files, both keys & + values are collected in 'blocks' + separately and compressed. The size of + the 'block' is configurable. +
    + +

    The actual compression algorithm used to compress key and/or values can be + specified by using the appropriate {@link CompressionCodec}.

    + +

    The recommended way is to use the static createWriter methods + provided by the SequenceFile to chose the preferred format.

    + +

    The {@link SequenceFile.Reader} acts as the bridge and can read any of the + above SequenceFile formats.

    + +

    SequenceFile Formats

    + +

    Essentially there are 3 different formats for SequenceFiles + depending on the CompressionType specified. All of them share a + common header described below. + +

    +
      +
    • + version - 3 bytes of magic header SEQ, followed by 1 byte of actual + version number (e.g. SEQ4 or SEQ6) +
    • +
    • + keyClassName -key class +
    • +
    • + valueClassName - value class +
    • +
    • + compression - A boolean which specifies if compression is turned on for + keys/values in this file. +
    • +
    • + blockCompression - A boolean which specifies if block-compression is + turned on for keys/values in this file. +
    • +
    • + compression codec - CompressionCodec class which is used for + compression of keys and/or values (if compression is + enabled). +
    • +
    • + metadata - {@link Metadata} for this file. +
    • +
    • + sync - A sync marker to denote end of the header. +
    • +
    + +
    Uncompressed SequenceFile Format
    +
      +
    • + Header +
    • +
    • + Record +
        +
      • Record length
      • +
      • Key length
      • +
      • Key
      • +
      • Value
      • +
      +
    • +
    • + A sync-marker every few 100 kilobytes or so. +
    • +
    + +
    Record-Compressed SequenceFile Format
    +
      +
    • + Header +
    • +
    • + Record +
        +
      • Record length
      • +
      • Key length
      • +
      • Key
      • +
      • Compressed Value
      • +
      +
    • +
    • + A sync-marker every few 100 kilobytes or so. +
    • +
    + +
    Block-Compressed SequenceFile Format
    +
      +
    • + Header +
    • +
    • + Record Block +
        +
      • Uncompressed number of records in the block
      • +
      • Compressed key-lengths block-size
      • +
      • Compressed key-lengths block
      • +
      • Compressed keys block-size
      • +
      • Compressed keys block
      • +
      • Compressed value-lengths block-size
      • +
      • Compressed value-lengths block
      • +
      • Compressed values block-size
      • +
      • Compressed values block
      • +
      +
    • +
    • + A sync-marker every block. +
    • +
    + +

    The compressed blocks of key lengths and value lengths consist of the + actual lengths of individual keys/values encoded in ZeroCompressedInteger + format.

    + + @see CompressionCodec]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + o is a ShortWritable with the same value.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + the class of the objects to stringify]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + position. Note that this + method avoids using the converter or doing String instantiation + @return the Unicode scalar value at position or -1 + if the position is invalid or points to a + trailing byte]]> + + + + + + + + + + what in the backing + buffer, starting as position start. The starting + position is measured in bytes and the return value is in + terms of byte position in the buffer. The backing buffer is + not converted to a string for this operation. + @return byte position of the first occurrence of the search + string in the UTF-8 buffer or -1 if not found]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Note: For performance reasons, this call does not clear the + underlying byte array that is retrievable via {@link #getBytes()}. + In order to free the byte-array memory, call {@link #set(byte[])} + with an empty byte array (For example, new byte[0]).]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + o is a Text with the same contents.]]> + + + + + + + + + + + + + + + + + + + + + + + + + replace is true, then + malformed input is replaced with the + substitution character, which is U+FFFD. Otherwise the + method throws a MalformedInputException.]]> + + + + + + + + + + + + + + + replace is true, then + malformed input is replaced with the + substitution character, which is U+FFFD. Otherwise the + method throws a MalformedInputException. + @return ByteBuffer: bytes stores at ByteBuffer.array() + and length is ByteBuffer.limit()]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + In + addition, it provides methods for string traversal without converting the + byte array to a string.

    Also includes utilities for + serializing/deserialing a string, coding/decoding a string, checking if a + byte array contains valid UTF8 code, calculating the length of an encoded + string.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This is useful when a class may evolve, so that instances written by the + old version of the class may still be processed by the new version. To + handle this situation, {@link #readFields(DataInput)} + implementations should catch {@link VersionMismatchException}.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + o is a VIntWritable with the same value.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + o is a VLongWritable with the same value.]]> + + + + + + + + + + + + + + + + + + + + + + + + out. + + @param out DataOuput to serialize this object into. + @throws IOException]]> + + + + + + + in. + +

    For efficiency, implementations should attempt to re-use storage in the + existing object where possible.

    + + @param in DataInput to deseriablize this object from. + @throws IOException]]> +
    + + + Any key or value type in the Hadoop Map-Reduce + framework implements this interface.

    + +

    Implementations typically implement a static read(DataInput) + method which constructs a new instance, calls {@link #readFields(DataInput)} + and returns the instance.

    + +

    Example:

    +

    +     public class MyWritable implements Writable {
    +       // Some data
    +       private int counter;
    +       private long timestamp;
    +
    +       // Default constructor to allow (de)serialization
    +       MyWritable() { }
    +
    +       public void write(DataOutput out) throws IOException {
    +         out.writeInt(counter);
    +         out.writeLong(timestamp);
    +       }
    +
    +       public void readFields(DataInput in) throws IOException {
    +         counter = in.readInt();
    +         timestamp = in.readLong();
    +       }
    +
    +       public static MyWritable read(DataInput in) throws IOException {
    +         MyWritable w = new MyWritable();
    +         w.readFields(in);
    +         return w;
    +       }
    +     }
    + 

    ]]> +
    + + + + + + + + WritableComparables can be compared to each other, typically + via Comparators. Any type which is to be used as a + key in the Hadoop Map-Reduce framework should implement this + interface.

    + +

    Note that hashCode() is frequently used in Hadoop to partition + keys. It's important that your implementation of hashCode() returns the same + result across different instances of the JVM. Note also that the default + hashCode() implementation in Object does not + satisfy this property.

    + +

    Example:

    +

    +     public class MyWritableComparable implements WritableComparable {
    +       // Some data
    +       private int counter;
    +       private long timestamp;
    +       
    +       public void write(DataOutput out) throws IOException {
    +         out.writeInt(counter);
    +         out.writeLong(timestamp);
    +       }
    +       
    +       public void readFields(DataInput in) throws IOException {
    +         counter = in.readInt();
    +         timestamp = in.readLong();
    +       }
    +       
    +       public int compareTo(MyWritableComparable o) {
    +         int thisValue = this.value;
    +         int thatValue = o.value;
    +         return (thisValue < thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
    +       }
    +
    +       public int hashCode() {
    +         final int prime = 31;
    +         int result = 1;
    +         result = prime * result + counter;
    +         result = prime * result + (int) (timestamp ^ (timestamp >>> 32));
    +         return result
    +       }
    +     }
    + 

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The default implementation reads the data into two {@link + WritableComparable}s (using {@link + Writable#readFields(DataInput)}, then calls {@link + #compare(WritableComparable,WritableComparable)}.]]> + + + + + + + The default implementation uses the natural ordering, calling {@link + Comparable#compareTo(Object)}.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This base implementation uses the natural ordering. To define alternate + orderings, override {@link #compare(WritableComparable,WritableComparable)}. + +

    One may optimize compare-intensive operations by overriding + {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are + provided to assist in optimized implementations of this method.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Enum type + @param in DataInput to read from + @param enumType Class type of Enum + @return Enum represented by String read from DataInput + @throws IOException]]> + + + + + + + + + + + + + + + + len number of bytes in input streamin + @param in input stream + @param len number of bytes to skip + @throws IOException when skipped less number of bytes]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CompressionCodec for which to get the + Compressor + @param conf the Configuration object which contains confs for creating or reinit the compressor + @return Compressor for the given + CompressionCodec from the pool or a new one]]> + + + + + + + + + CompressionCodec for which to get the + Decompressor + @return Decompressor for the given + CompressionCodec the pool or a new one]]> + + + + + + Compressor to be returned to the pool]]> + + + + + + Decompressor to be returned to the + pool]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Codec aliases are case insensitive. +

    + The code alias is the short class name (without the package name). + If the short class name ends with 'Codec', then there are two aliases for + the codec, the complete short class name and the short class name without + the 'Codec' ending. For example for the 'GzipCodec' codec class name the + alias are 'gzip' and 'gzipcodec'. + + @param codecName the canonical class name of the codec + @return the codec object]]> + + + + + + + Codec aliases are case insensitive. +

    + The code alias is the short class name (without the package name). + If the short class name ends with 'Codec', then there are two aliases for + the codec, the complete short class name and the short class name without + the 'Codec' ending. For example for the 'GzipCodec' codec class name the + alias are 'gzip' and 'gzipcodec'. + + @param codecName the canonical class name of the codec + @return the codec class]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Implementations are assumed to be buffered. This permits clients to + reposition the underlying input stream then call {@link #resetState()}, + without having to also synchronize client buffers.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true indicating that more input data is required. + + @param b Input data + @param off Start offset + @param len Length]]> + + + + + true if the input data buffer is empty and + #setInput() should be called in order to provide more input.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if the end of the compressed + data output stream has been reached.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true indicating that more input data is required. + (Both native and non-native versions of various Decompressors require + that the data passed in via b[] remain unmodified until + the caller is explicitly notified--via {@link #needsInput()}--that the + buffer may be safely modified. With this requirement, an extra + buffer-copy can be avoided.) + + @param b Input data + @param off Start offset + @param len Length]]> + + + + + true if the input data buffer is empty and + {@link #setInput(byte[], int, int)} should be called to + provide more input. + + @return true if the input data buffer is empty and + {@link #setInput(byte[], int, int)} should be called in + order to provide more input.]]> + + + + + + + + + + + + + true if a preset dictionary is needed for decompression. + @return true if a preset dictionary is needed for decompression]]> + + + + + true if the end of the decompressed + data output stream has been reached. Indicates a concatenated data stream + when finished() returns true and {@link #getRemaining()} + returns a positive value. finished() will be reset with the + {@link #reset()} method. + @return true if the end of the decompressed + data output stream has been reached.]]> + + + + + + + + + + + + + + true and getRemaining() returns a positive value. If + {@link #finished()} returns true and getRemaining() returns + a zero value, indicates that the end of data stream has been reached and + is not a concatenated data stream. + @return The number of bytes remaining in the compressed data buffer.]]> + + + + + true and {@link #getRemaining()} returns a positive value, + reset() is called before processing of the next data stream in the + concatenated data stream. {@link #finished()} will be reset and will + return false when reset() is called.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

  • "none" - No compression. +
  • "lzo" - LZO compression. +
  • "gz" - GZIP compression. + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
  • Block Compression. +
  • Named meta data blocks. +
  • Sorted or unsorted keys. +
  • Seek by key or by file offset. + + The memory footprint of a TFile includes the following: +
      +
    • Some constant overhead of reading or writing a compressed block. +
        +
      • Each compressed block requires one compression/decompression codec for + I/O. +
      • Temporary space to buffer the key. +
      • Temporary space to buffer the value (for TFile.Writer only). Values are + chunk encoded, so that we buffer at most one chunk of user data. By default, + the chunk buffer is 1MB. Reading chunked value does not require additional + memory. +
      +
    • TFile index, which is proportional to the total number of Data Blocks. + The total amount of memory needed to hold the index can be estimated as + (56+AvgKeySize)*NumBlocks. +
    • MetaBlock index, which is proportional to the total number of Meta + Blocks.The total amount of memory needed to hold the index for Meta Blocks + can be estimated as (40+AvgMetaBlockName)*NumMetaBlock. +
    +

    + The behavior of TFile can be customized by the following variables through + Configuration: +

      +
    • tfile.io.chunk.size: Value chunk size. Integer (in bytes). Default + to 1MB. Values of the length less than the chunk size is guaranteed to have + known value length in read time (See + {@link TFile.Reader.Scanner.Entry#isValueLengthKnown()}). +
    • tfile.fs.output.buffer.size: Buffer size used for + FSDataOutputStream. Integer (in bytes). Default to 256KB. +
    • tfile.fs.input.buffer.size: Buffer size used for + FSDataInputStream. Integer (in bytes). Default to 256KB. +
    +

    + Suggestions on performance optimization. +

      +
    • Minimum block size. We recommend a setting of minimum block size between + 256KB to 1MB for general usage. Larger block size is preferred if files are + primarily for sequential access. However, it would lead to inefficient random + access (because there are more data to decompress). Smaller blocks are good + for random access, but require more memory to hold the block index, and may + be slower to create (because we must flush the compressor stream at the + conclusion of each data block, which leads to an FS I/O flush). Further, due + to the internal caching in Compression codec, the smallest possible block + size would be around 20KB-30KB. +
    • The current implementation does not offer true multi-threading for + reading. The implementation uses FSDataInputStream seek()+read(), which is + shown to be much faster than positioned-read call in single thread mode. + However, it also means that if multiple threads attempt to access the same + TFile (using multiple scanners) simultaneously, the actual I/O is carried out + sequentially even if they access different DFS blocks. +
    • Compression codec. Use "none" if the data is not very compressable (by + compressable, I mean a compression ratio at least 2:1). Generally, use "lzo" + as the starting point for experimenting. "gz" overs slightly better + compression ratio over "lzo" but requires 4x CPU to compress and 2x CPU to + decompress, comparing to "lzo". +
    • File system buffering, if the underlying FSDataInputStream and + FSDataOutputStream is already adequately buffered; or if applications + reads/writes keys and values in large buffers, we can reduce the sizes of + input/output buffering in TFile layer by setting the configuration parameters + "tfile.fs.input.buffer.size" and "tfile.fs.output.buffer.size". +
    + + Some design rationale behind TFile can be found at Hadoop-3315.]]> + + + + + + + + + + + Utils#writeVLong(out, n). + + @param out + output stream + @param n + The integer to be encoded + @throws IOException + @see Utils#writeVLong(DataOutput, long)]]> + + + + + + + + +
  • if n in [-32, 127): encode in one byte with the actual value. + Otherwise, +
  • if n in [-20*2^8, 20*2^8): encode in two bytes: byte[0] = n/256 - 52; + byte[1]=n&0xff. Otherwise, +
  • if n IN [-16*2^16, 16*2^16): encode in three bytes: byte[0]=n/2^16 - + 88; byte[1]=(n>>8)&0xff; byte[2]=n&0xff. Otherwise, +
  • if n in [-8*2^24, 8*2^24): encode in four bytes: byte[0]=n/2^24 - 112; + byte[1] = (n>>16)&0xff; byte[2] = (n>>8)&0xff; byte[3]=n&0xff. Otherwise: +
  • if n in [-2^31, 2^31): encode in five bytes: byte[0]=-125; byte[1] = + (n>>24)&0xff; byte[2]=(n>>16)&0xff; byte[3]=(n>>8)&0xff; byte[4]=n&0xff; +
  • if n in [-2^39, 2^39): encode in six bytes: byte[0]=-124; byte[1] = + (n>>32)&0xff; byte[2]=(n>>24)&0xff; byte[3]=(n>>16)&0xff; + byte[4]=(n>>8)&0xff; byte[5]=n&0xff +
  • if n in [-2^47, 2^47): encode in seven bytes: byte[0]=-123; byte[1] = + (n>>40)&0xff; byte[2]=(n>>32)&0xff; byte[3]=(n>>24)&0xff; + byte[4]=(n>>16)&0xff; byte[5]=(n>>8)&0xff; byte[6]=n&0xff; +
  • if n in [-2^55, 2^55): encode in eight bytes: byte[0]=-122; byte[1] = + (n>>48)&0xff; byte[2] = (n>>40)&0xff; byte[3]=(n>>32)&0xff; + byte[4]=(n>>24)&0xff; byte[5]=(n>>16)&0xff; byte[6]=(n>>8)&0xff; + byte[7]=n&0xff; +
  • if n in [-2^63, 2^63): encode in nine bytes: byte[0]=-121; byte[1] = + (n>>54)&0xff; byte[2] = (n>>48)&0xff; byte[3] = (n>>40)&0xff; + byte[4]=(n>>32)&0xff; byte[5]=(n>>24)&0xff; byte[6]=(n>>16)&0xff; + byte[7]=(n>>8)&0xff; byte[8]=n&0xff; + + + @param out + output stream + @param n + the integer number + @throws IOException]]> + + + + + + + (int)Utils#readVLong(in). + + @param in + input stream + @return the decoded integer + @throws IOException + + @see Utils#readVLong(DataInput)]]> + + + + + + + +
  • if (FB >= -32), return (long)FB; +
  • if (FB in [-72, -33]), return (FB+52)<<8 + NB[0]&0xff; +
  • if (FB in [-104, -73]), return (FB+88)<<16 + (NB[0]&0xff)<<8 + + NB[1]&0xff; +
  • if (FB in [-120, -105]), return (FB+112)<<24 + (NB[0]&0xff)<<16 + + (NB[1]&0xff)<<8 + NB[2]&0xff; +
  • if (FB in [-128, -121]), return interpret NB[FB+129] as a signed + big-endian integer. + + @param in + input stream + @return the decoded long integer. + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + Type of the input key. + @param list + The list + @param key + The input key. + @param cmp + Comparator for the key. + @return The index to the desired element if it exists; or list.size() + otherwise.]]> + + + + + + + + + Type of the input key. + @param list + The list + @param key + The input key. + @param cmp + Comparator for the key. + @return The index to the desired element if it exists; or list.size() + otherwise.]]> + + + + + + + + Type of the input key. + @param list + The list + @param key + The input key. + @return The index to the desired element if it exists; or list.size() + otherwise.]]> + + + + + + + + Type of the input key. + @param list + The list + @param key + The input key. + @return The index to the desired element if it exists; or list.size() + otherwise.]]> + + + + + + + + + + + + + + + + + An experimental {@link Serialization} for Java {@link Serializable} classes. +

    + @see JavaSerializationComparator]]> +
    +
    + + + + + + + + + A {@link RawComparator} that uses a {@link JavaSerialization} + {@link Deserializer} to deserialize objects that are then compared via + their {@link Comparable} interfaces. +

    + @param + @see JavaSerialization]]> +
    +
    + + + + + + + + + + + + + +This package provides a mechanism for using different serialization frameworks +in Hadoop. The property "io.serializations" defines a list of +{@link org.apache.hadoop.io.serializer.Serialization}s that know how to create +{@link org.apache.hadoop.io.serializer.Serializer}s and +{@link org.apache.hadoop.io.serializer.Deserializer}s. +

    + +

    +To add a new serialization framework write an implementation of +{@link org.apache.hadoop.io.serializer.Serialization} and add its name to the +"io.serializations" property. +

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + avro.reflect.pkgs or implement + {@link AvroReflectSerializable} interface.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + +This package provides Avro serialization in Hadoop. This can be used to +serialize/deserialize Avro types in Hadoop. +

    + +

    +Use {@link org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization} for +serialization of classes generated by Avro's 'specific' compiler. +

    + +

    +Use {@link org.apache.hadoop.io.serializer.avro.AvroReflectSerialization} for +other classes. +{@link org.apache.hadoop.io.serializer.avro.AvroReflectSerialization} work for +any class which is either in the package list configured via +{@link org.apache.hadoop.io.serializer.avro.AvroReflectSerialization#AVRO_REFLECT_PACKAGES} +or implement {@link org.apache.hadoop.io.serializer.avro.AvroReflectSerializable} +interface. +

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Implementations of this interface consume the {@link MetricsRecord} generated + from {@link MetricsSource}. It registers with {@link MetricsSystem} which + periodically pushes the {@link MetricsRecord} to the sink using + {@link #putMetrics(MetricsRecord)} method. If the implementing class also + implements {@link Closeable}, then the MetricsSystem will close the sink when + it is stopped.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + the actual type of the source object + @param source object to register + @return the source object + @exception MetricsException]]> + + + + + + + + the actual type of the source object + @param source object to register + @param name of the source. Must be unique or null (then extracted from + the annotations of the source object.) + @param desc the description of the source (or null. See above.) + @return the source object + @exception MetricsException]]> + + + + + + + + + + + + + + + + + + + + +
  • {@link MetricsSource} generate and update metrics information.
  • +
  • {@link MetricsSink} consume the metrics information
  • + + + {@link MetricsSource} and {@link MetricsSink} register with the metrics + system. Implementations of {@link MetricsSystem} polls the + {@link MetricsSource}s periodically and pass the {@link MetricsRecord}s to + {@link MetricsSink}.]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + (aggregate). + Filter out entries that don't have at least minSamples. + + @return a map of peer DataNode Id to the average latency to that + node seen over the measurement period.]]> + + + + + This class maintains a group of rolling average metrics. It implements the + algorithm of rolling average, i.e. a number of sliding windows are kept to + roll over and evict old subsets of samples. Each window has a subset of + samples in a stream, where sub-sum and sub-total are collected. All sub-sums + and sub-totals in all windows will be aggregated to final-sum and final-total + used to compute final average, which is called rolling average. +

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This class is a metrics sink that uses + {@link org.apache.hadoop.fs.FileSystem} to write the metrics logs. Every + roll interval a new directory will be created under the path specified by the + basepath property. All metrics will be logged to a file in the + current interval's directory in a file named <hostname>.log, where + <hostname> is the name of the host on which the metrics logging + process is running. The base path is set by the + <prefix>.sink.<instance>.basepath property. The + time zone used to create the current interval's directory name is GMT. If + the basepath property isn't specified, it will default to + "/tmp", which is the temp directory on whatever default file + system is configured for the cluster.

    + +

    The <prefix>.sink.<instance>.ignore-error + property controls whether an exception is thrown when an error is encountered + writing a log file. The default value is true. When set to + false, file errors are quietly swallowed.

    + +

    The roll-interval property sets the amount of time before + rolling the directory. The default value is 1 hour. The roll interval may + not be less than 1 minute. The property's value should be given as + number unit, where number is an integer value, and + unit is a valid unit. Valid units are minute, hour, + and day. The units are case insensitive and may be abbreviated or + plural. If no units are specified, hours are assumed. For example, + "2", "2h", "2 hour", and + "2 hours" are all valid ways to specify two hours.

    + +

    The roll-offset-interval-millis property sets the upper + bound on a random time interval (in milliseconds) that is used to delay + before the initial roll. All subsequent rolls will happen an integer + number of roll intervals after the initial roll, hence retaining the original + offset. The purpose of this property is to insert some variance in the roll + times so that large clusters using this sink on every node don't cause a + performance impact on HDFS by rolling simultaneously. The default value is + 30000 (30s). When writing to HDFS, as a rule of thumb, the roll offset in + millis should be no less than the number of sink instances times 5. + +

    The primary use of this class is for logging to HDFS. As it uses + {@link org.apache.hadoop.fs.FileSystem} to access the target file system, + however, it can be used to write to the local file system, Amazon S3, or any + other supported file system. The base path for the sink will determine the + file system used. An unqualified path will write to the default file system + set by the configuration.

    + +

    Not all file systems support the ability to append to files. In file + systems without the ability to append to files, only one writer can write to + a file at a time. To allow for concurrent writes from multiple daemons on a + single host, the source property is used to set unique headers + for the log files. The property should be set to the name of + the source daemon, e.g. namenode. The value of the + source property should typically be the same as the property's + prefix. If this property is not set, the source is taken to be + unknown.

    + +

    Instead of appending to an existing file, by default the sink + will create a new file with a suffix of ".<n>&quet;, where + n is the next lowest integer that isn't already used in a file name, + similar to the Hadoop daemon logs. NOTE: the file with the highest + sequence number is the newest file, unlike the Hadoop daemon logs.

    + +

    For file systems that allow append, the sink supports appending to the + existing file instead. If the allow-append property is set to + true, the sink will instead append to the existing file on file systems that + support appends. By default, the allow-append property is + false.

    + +

    Note that when writing to HDFS with allow-append set to true, + there is a minimum acceptable number of data nodes. If the number of data + nodes drops below that minimum, the append will succeed, but reading the + data will fail with an IOException in the DataStreamer class. The minimum + number of data nodes required for a successful append is generally 2 or + 3.

    + +

    Note also that when writing to HDFS, the file size information is not + updated until the file is closed (at the end of the interval) even though + the data is being written successfully. This is a known HDFS limitation that + exists because of the performance cost of updating the metadata. See + HDFS-5478.

    + +

    When using this sink in a secure (Kerberos) environment, two additional + properties must be set: keytab-key and + principal-key. keytab-key should contain the key by + which the keytab file can be found in the configuration, for example, + yarn.nodemanager.keytab. principal-key should + contain the key by which the principal can be found in the configuration, + for example, yarn.nodemanager.principal.]]> + + + + + + + + + + + + + + + + + + + + + + + + + CollectD StatsD plugin). +
    + To configure this plugin, you will need to add the following + entries to your hadoop-metrics2.properties file: +
    +

    + *.sink.statsd.class=org.apache.hadoop.metrics2.sink.StatsDSink
    + [prefix].sink.statsd.server.host=
    + [prefix].sink.statsd.server.port=
    + [prefix].sink.statsd.skip.hostname=true|false (optional)
    + [prefix].sink.statsd.service.name=NameNode (name you want for service)
    + 
    ]]> +
    +
    + +
    + + + + + + + + + + + + + ,name=" + Where the and are the supplied parameters. + + @param serviceName + @param nameName + @param theMbean - the MBean to register + @return the named used to register the MBean]]> + + + + + + + + + ,name=" + Where the and are the supplied parameters. + + @param serviceName + @param nameName + @param properties - Key value pairs to define additional JMX ObjectName + properties. + @param theMbean - the MBean to register + @return the named used to register the MBean]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + hostname or hostname:port. If + the specs string is null, defaults to localhost:defaultPort. + + @param specs server specs (see description) + @param defaultPort the default port if not specified + @return a list of InetSocketAddress objects.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This method is used when parts of Hadoop need know whether to apply + single rack vs multi-rack policies, such as during block placement. + Such algorithms behave differently if they are on multi-switch systems. +

    + + @return true if the mapping thinks that it is on a single switch]]> +
    +
    + + + + + + + + + + + + + + + + + This predicate simply assumes that all mappings not derived from + this class are multi-switch. + @param mapping the mapping to query + @return true if the base class says it is single switch, or the mapping + is not derived from this class.]]> + + + + It is not mandatory to + derive {@link DNSToSwitchMapping} implementations from it, but it is strongly + recommended, as it makes it easy for the Hadoop developers to add new methods + to this base class that are automatically picked up by all implementations. +

    + + This class does not extend the Configured + base class, and should not be changed to do so, as it causes problems + for subclasses. The constructor of the Configured calls + the {@link #setConf(Configuration)} method, which will call into the + subclasses before they have been fully constructed.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + If a name cannot be resolved to a rack, the implementation + should return {@link NetworkTopology#DEFAULT_RACK}. This + is what the bundled implementations do, though it is not a formal requirement + + @param names the list of hosts to resolve (can be empty) + @return list of resolved network paths. + If names is empty, the returned list is also empty]]> + + + + + + + + + + + + + + + + + + + + + + + + Calling {@link #setConf(Configuration)} will trigger a + re-evaluation of the configuration settings and so be used to + set up the mapping script.]]> + + + + + + + + + + + + + + + + + + + + + This will get called in the superclass constructor, so a check is needed + to ensure that the raw mapping is defined before trying to relaying a null + configuration. + @param conf]]> + + + + + + + + + + It contains a static class RawScriptBasedMapping that performs + the work: reading the configuration parameters, executing any defined + script, handling errors and such like. The outer + class extends {@link CachedDNSToSwitchMapping} to cache the delegated + queries. +

    + This DNS mapper's {@link #isSingleSwitch()} predicate returns + true if and only if a script is defined.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Simple {@link DNSToSwitchMapping} implementation that reads a 2 column text + file. The columns are separated by whitespace. The first column is a DNS or + IP address and the second column specifies the rack where the address maps. +

    +

    + This class uses the configuration parameter {@code + net.topology.table.file.name} to locate the mapping file. +

    +

    + Calls to {@link #resolve(List)} will look up the address as defined in the + mapping file. If no entry corresponding to the address is found, the value + {@code /default-rack} is returned. +

    ]]> +
    +
    + + + + +
    + + + + + + + + + + + + + + + + + + (cause==null ? null : cause.toString()) (which + typically contains the class and detail message of cause). + @param cause the cause (which is saved for later retrieval by the + {@link #getCause()} method). (A null value is + permitted, and indicates that the cause is nonexistent or + unknown.)]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mapping + and mapping]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /host@realm. + @param principalName principal name of format as described above + @return host name if the the string conforms to the above format, else null]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + "jack" + + @param userName + @return userName without login method]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + the return type of the run method + @param action the method to execute + @return the value from the run method]]> + + + + + + + + the return type of the run method + @param action the method to execute + @return the value from the run method + @throws IOException if the action throws an IOException + @throws Error if the action throws an Error + @throws RuntimeException if the action throws a RuntimeException + @throws InterruptedException if the action throws an InterruptedException + @throws UndeclaredThrowableException if the action throws something else]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CredentialProvider implementations must be thread safe.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + (cause==null ? null : cause.toString()) (which + typically contains the class and detail message of cause). + @param cause the cause (which is saved for later retrieval by the + {@link #getCause()} method). (A null value is + permitted, and indicates that the cause is nonexistent or + unknown.)]]> + + + + + + + + + + + + + + does not provide the stack trace for security purposes.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A User-Agent String is considered to be a browser if it matches + any of the regex patterns from browser-useragent-regex; the default + behavior is to consider everything a browser that matches the following: + "^Mozilla.*,^Opera.*". Subclasses can optionally override + this method to use different behavior. + + @param userAgent The User-Agent String, or null if there isn't one + @return true if the User-Agent String refers to a browser, false if not]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The type of the token identifier]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + T extends TokenIdentifier]]> + + + + + + + + + + DelegationTokenAuthenticatedURL. +

    + An instance of the default {@link DelegationTokenAuthenticator} will be + used.]]> + + + + + DelegationTokenAuthenticatedURL. + + @param authenticator the {@link DelegationTokenAuthenticator} instance to + use, if null the default one will be used.]]> + + + + + DelegationTokenAuthenticatedURL using the default + {@link DelegationTokenAuthenticator} class. + + @param connConfigurator a connection configurator.]]> + + + + + DelegationTokenAuthenticatedURL. + + @param authenticator the {@link DelegationTokenAuthenticator} instance to + use, if null the default one will be used. + @param connConfigurator a connection configurator.]]> + + + + + + + + + + + + The default class is {@link KerberosDelegationTokenAuthenticator} + + @return the delegation token authenticator class to use as default.]]> + + + + + + + This method is provided to enable WebHDFS backwards compatibility. + + @param useQueryString TRUE if the token is transmitted in the + URL query string, FALSE if the delegation token is transmitted + using the {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP + header.]]> + + + + + TRUE if the token is transmitted in the URL query + string, FALSE if the delegation token is transmitted using the + {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.]]> + + + + + + + + + + + + + + + + + + Authenticator. + + @param url the URL to connect to. Only HTTP/S URLs are supported. + @param token the authentication token being used for the user. + @return an authenticated {@link HttpURLConnection}. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + + + Authenticator. If the doAs parameter is not NULL, + the request will be done on behalf of the specified doAs user. + + @param url the URL to connect to. Only HTTP/S URLs are supported. + @param token the authentication token being used for the user. + @param doAs user to do the the request on behalf of, if NULL the request is + as self. + @return an authenticated {@link HttpURLConnection}. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + + + Authenticator + for authentication. + + @param url the URL to get the delegation token from. Only HTTP/S URLs are + supported. + @param token the authentication token being used for the user where the + Delegation token will be stored. + @param renewer the renewer user. + @return a delegation token. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + + + + Authenticator + for authentication. + + @param url the URL to get the delegation token from. Only HTTP/S URLs are + supported. + @param token the authentication token being used for the user where the + Delegation token will be stored. + @param renewer the renewer user. + @param doAsUser the user to do as, which will be the token owner. + @return a delegation token. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + + Authenticator for authentication. + + @param url the URL to renew the delegation token from. Only HTTP/S URLs are + supported. + @param token the authentication token with the Delegation Token to renew. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + + + Authenticator for authentication. + + @param url the URL to renew the delegation token from. Only HTTP/S URLs are + supported. + @param token the authentication token with the Delegation Token to renew. + @param doAsUser the user to do as, which will be the token owner. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + Authenticator. + + @param url the URL to cancel the delegation token from. Only HTTP/S URLs + are supported. + @param token the authentication token with the Delegation Token to cancel. + @throws IOException if an IO error occurred.]]> + + + + + + + + + Authenticator. + + @param url the URL to cancel the delegation token from. Only HTTP/S URLs + are supported. + @param token the authentication token with the Delegation Token to cancel. + @param doAsUser the user to do as, which will be the token owner. + @throws IOException if an IO error occurred.]]> + + + + DelegationTokenAuthenticatedURL is a + {@link AuthenticatedURL} sub-class with built-in Hadoop Delegation Token + functionality. +

    + The authentication mechanisms supported by default are Hadoop Simple + authentication (also known as pseudo authentication) and Kerberos SPNEGO + authentication. +

    + Additional authentication mechanisms can be supported via {@link + DelegationTokenAuthenticator} implementations. +

    + The default {@link DelegationTokenAuthenticator} is the {@link + KerberosDelegationTokenAuthenticator} class which supports + automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication via + the {@link PseudoDelegationTokenAuthenticator} class. +

    + AuthenticatedURL instances are not thread-safe.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Authenticator + for authentication. + + @param url the URL to get the delegation token from. Only HTTP/S URLs are + supported. + @param token the authentication token being used for the user where the + Delegation token will be stored. + @param renewer the renewer user. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + + + + Authenticator + for authentication. + + @param url the URL to get the delegation token from. Only HTTP/S URLs are + supported. + @param token the authentication token being used for the user where the + Delegation token will be stored. + @param renewer the renewer user. + @param doAsUser the user to do as, which will be the token owner. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + + + Authenticator for authentication. + + @param url the URL to renew the delegation token from. Only HTTP/S URLs are + supported. + @param token the authentication token with the Delegation Token to renew. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + + + + Authenticator for authentication. + + @param url the URL to renew the delegation token from. Only HTTP/S URLs are + supported. + @param token the authentication token with the Delegation Token to renew. + @param doAsUser the user to do as, which will be the token owner. + @throws IOException if an IO error occurred. + @throws AuthenticationException if an authentication exception occurred.]]> + + + + + + + + + Authenticator. + + @param url the URL to cancel the delegation token from. Only HTTP/S URLs + are supported. + @param token the authentication token with the Delegation Token to cancel. + @throws IOException if an IO error occurred.]]> + + + + + + + + + + Authenticator. + + @param url the URL to cancel the delegation token from. Only HTTP/S URLs + are supported. + @param token the authentication token with the Delegation Token to cancel. + @param doAsUser the user to do as, which will be the token owner. + @throws IOException if an IO error occurred.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + KerberosDelegationTokenAuthenticator provides support for + Kerberos SPNEGO authentication mechanism and support for Hadoop Delegation + Token operations. +

    + It falls back to the {@link PseudoDelegationTokenAuthenticator} if the HTTP + endpoint does not trigger a SPNEGO authentication]]> + + + + + + + + + PseudoDelegationTokenAuthenticator provides support for + Hadoop's pseudo authentication mechanism that accepts + the user name specified as a query string parameter and support for Hadoop + Delegation Token operations. +

    + This mimics the model of Hadoop Simple authentication trusting the + {@link UserGroupInformation#getCurrentUser()} value.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + live. + @return a (snapshotted) map of blocker name->description values]]> + + + + + + + + + + + + + Do nothing if the service is null or not + in a state in which it can be/needs to be stopped. +

    + The service state is checked before the operation begins. + This process is not thread safe. + @param service a service or null]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

  • Any long-lived operation here will prevent the service state + change from completing in a timely manner.
  • +
  • If another thread is somehow invoked from the listener, and + that thread invokes the methods of the service (including + subclass-specific methods), there is a risk of a deadlock.
  • + + + + @param service the service that has changed.]]> +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + The base implementation logs all arguments at the debug level, + then returns the passed in config unchanged.]]> + + + + + + + The action is to signal success by returning the exit code 0.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This method is called before {@link #init(Configuration)}; + Any non-null configuration that is returned from this operation + becomes the one that is passed on to that {@link #init(Configuration)} + operation. +

    + This permits implementations to change the configuration before + the init operation. As the ServiceLauncher only creates + an instance of the base {@link Configuration} class, it is + recommended to instantiate any subclass (such as YarnConfiguration) + that injects new resources. +

    + @param config the initial configuration build up by the + service launcher. + @param args list of arguments passed to the command line + after any launcher-specific commands have been stripped. + @return the configuration to init the service with. + Recommended: pass down the config parameter with any changes + @throws Exception any problem]]> + + + + + + + The return value becomes the exit code of the launched process. +

    + If an exception is raised, the policy is: +

      +
    1. Any subset of {@link org.apache.hadoop.util.ExitUtil.ExitException}: + the exception is passed up unmodified. +
    2. +
    3. Any exception which implements + {@link org.apache.hadoop.util.ExitCodeProvider}: + A new {@link ServiceLaunchException} is created with the exit code + and message of the thrown exception; the thrown exception becomes the + cause.
    4. +
    5. Any other exception: a new {@link ServiceLaunchException} is created + with the exit code {@link LauncherExitCodes#EXIT_EXCEPTION_THROWN} and + the message of the original exception (which becomes the cause).
    6. +
    + @return the exit code + @throws org.apache.hadoop.util.ExitUtil.ExitException an exception passed + up as the exit code and error text. + @throws Exception any exception to report. If it provides an exit code + this is used in a wrapping exception.]]> +
    +
    + + + The command line options will be passed down before the + {@link Service#init(Configuration)} operation is invoked via an + invocation of {@link LaunchableService#bindArgs(Configuration, List)} + After the service has been successfully started via {@link Service#start()} + the {@link LaunchableService#execute()} method is called to execute the + service. When this method returns, the service launcher will exit, using + the return code from the method as its exit option.]]> + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Approximate HTTP equivalent: {@code 400 Bad Request}]]> + + + + + + approximate HTTP equivalent: Approximate HTTP equivalent: {@code 401 Unauthorized}]]> + + + + + + + + + + + Approximate HTTP equivalent: Approximate HTTP equivalent: {@code 403: Forbidden}]]> + + + + + + Approximate HTTP equivalent: {@code 404: Not Found}]]> + + + + + + Approximate HTTP equivalent: {@code 405: Not allowed}]]> + + + + + + Approximate HTTP equivalent: {@code 406: Not Acceptable}]]> + + + + + + Approximate HTTP equivalent: {@code 408: Request Timeout}]]> + + + + + + Approximate HTTP equivalent: {@code 409: Conflict}]]> + + + + + + Approximate HTTP equivalent: {@code 500 Internal Server Error}]]> + + + + + + Approximate HTTP equivalent: {@code 501: Not Implemented}]]> + + + + + + Approximate HTTP equivalent: {@code 503 Service Unavailable}]]> + + + + + + If raised, this is expected to be raised server-side and likely due + to client/server version incompatibilities. +

    + Approximate HTTP equivalent: {@code 505: Version Not Supported}]]> + + + + + + + + + + + + + + + Codes with a YARN prefix are YARN-related. +

    + Many of the exit codes are designed to resemble HTTP error codes, + squashed into a single byte. e.g 44 , "not found" is the equivalent + of 404. The various 2XX HTTP error codes aren't followed; + the Unix standard of "0" for success is used. +

    +    0-10: general command issues
    +   30-39: equivalent to the 3XX responses, where those responses are
    +          considered errors by the application.
    +   40-49: client-side/CLI/config problems
    +   50-59: service-side problems.
    +   60+  : application specific error codes
    + 
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + This uses {@link String#format(String, Object...)} + to build the formatted exception in the ENGLISH locale. +

    + If the last argument is a throwable, it becomes the cause of the exception. + It will also be used as a parameter for the format. + @param exitCode exit code + @param format format for message to use in exception + @param args list of arguments]]> + + + + + When caught by the ServiceLauncher, it will convert that + into a process exit code. + + The {@link #ServiceLaunchException(int, String, Object...)} constructor + generates formatted exceptions.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Clients and/or applications can use the provided Progressable + to explicitly report progress to the Hadoop framework. This is especially + important for operations which take significant amount of time since, + in-lieu of the reported progress, the framework has to assume that an error + has occurred and time-out the operation.

    ]]> +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Class is to be obtained + @return the correctly typed Class of the given object.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + kill -0 command or equivalent]]> + + + + + + + + + + + + + + + + + + + ".cmd" on Windows, or ".sh" otherwise. + + @param parent File parent directory + @param basename String script file basename + @return File referencing the script in the directory]]> + + + + + + ".cmd" on Windows, or ".sh" otherwise. + + @param basename String script file basename + @return String script file name]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + IOException. + @return the path to {@link #WINUTILS_EXE} + @throws RuntimeException if the path is not resolvable]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Shell. + @return the thread that ran runCommand() that spawned this shell + or null if no thread is waiting for this shell to complete]]> + + + + + + + + + + + + Shell interface. + @param cmd shell command to execute. + @return the output of the executed command.]]> + + + + + + + + + Shell interface. + @param env the map of environment key=value + @param cmd shell command to execute. + @param timeout time in milliseconds after which script should be marked timeout + @return the output of the executed command. + @throws IOException on any problem.]]> + + + + + + + + Shell interface. + @param env the map of environment key=value + @param cmd shell command to execute. + @return the output of the executed command. + @throws IOException on any problem.]]> + + + + + Shell processes. + Iterates through a map of all currently running Shell + processes and destroys them one by one. This method is thread safe]]> + + + + + Shell objects.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CreateProcess synchronization object.]]> + + + + + os.name property.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Important: caller must check for this value being null. + The lack of such checks has led to many support issues being raised. +

    + @deprecated use one of the exception-raising getter methods, + specifically {@link #getWinUtilsPath()} or {@link #getWinUtilsFile()}]]> + + + + + + + + + + + + + + Shell can be used to run shell commands like du or + df. It also offers facilities to gate commands by + time-intervals.]]> + + + + + + + + ShutdownHookManager singleton. + + @return ShutdownHookManager singleton.]]> + + + + + + + Runnable + @param priority priority of the shutdownHook.]]> + + + + + + + + + Runnable + @param priority priority of the shutdownHook + @param timeout timeout of the shutdownHook + @param unit unit of the timeout TimeUnit]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ShutdownHookManager enables running shutdownHook + in a deterministic order, higher priority first. +

    + The JVM runs ShutdownHooks in a non-deterministic order or in parallel. + This class registers a single JVM shutdownHook and run all the + shutdownHooks registered to it (to this class) in order based on their + priority. + + Unless a hook was registered with a shutdown explicitly set through + {@link #addShutdownHook(Runnable, int, long, TimeUnit)}, + the shutdown time allocated to it is set by the configuration option + {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT} in + {@code core-site.xml}, with a default value of + {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT_DEFAULT} + seconds.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tool, is the standard for any Map-Reduce tool/application. + The tool/application should delegate the handling of + + standard command-line options to {@link ToolRunner#run(Tool, String[])} + and only handle its custom arguments.

    + +

    Here is how a typical Tool is implemented:

    +

    +     public class MyApp extends Configured implements Tool {
    +     
    +       public int run(String[] args) throws Exception {
    +         // Configuration processed by ToolRunner
    +         Configuration conf = getConf();
    +         
    +         // Create a JobConf using the processed conf
    +         JobConf job = new JobConf(conf, MyApp.class);
    +         
    +         // Process custom command-line options
    +         Path in = new Path(args[1]);
    +         Path out = new Path(args[2]);
    +         
    +         // Specify various job-specific parameters     
    +         job.setJobName("my-app");
    +         job.setInputPath(in);
    +         job.setOutputPath(out);
    +         job.setMapperClass(MyMapper.class);
    +         job.setReducerClass(MyReducer.class);
    +
    +         // Submit the job, then poll for progress until the job is complete
    +         RunningJob runningJob = JobClient.runJob(job);
    +         if (runningJob.isSuccessful()) {
    +           return 0;
    +         } else {
    +           return 1;
    +         }
    +       }
    +       
    +       public static void main(String[] args) throws Exception {
    +         // Let ToolRunner handle generic command-line options 
    +         int res = ToolRunner.run(new Configuration(), new MyApp(), args);
    +         
    +         System.exit(res);
    +       }
    +     }
    + 

    + + @see GenericOptionsParser + @see ToolRunner]]> +
    + + + + + + + + + + + + + Tool by {@link Tool#run(String[])}, after + parsing with the given generic arguments. Uses the given + Configuration, or builds one if null. + + Sets the Tool's configuration with the possibly modified + version of the conf. + + @param conf Configuration for the Tool. + @param tool Tool to run. + @param args command-line arguments to the tool. + @return exit code of the {@link Tool#run(String[])} method.]]> + + + + + + + + Tool with its Configuration. + + Equivalent to run(tool.getConf(), tool, args). + + @param tool Tool to run. + @param args command-line arguments to the tool. + @return exit code of the {@link Tool#run(String[])} method.]]> + + + + + + + + + + + + + + + + + ToolRunner can be used to run classes implementing + Tool interface. It works in conjunction with + {@link GenericOptionsParser} to parse the + + generic hadoop command line arguments and modifies the + Configuration of the Tool. The + application-specific options are passed along without being modified. +

    + + @see Tool + @see GenericOptionsParser]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + this filter. + @param nbHash The number of hash function to consider. + @param hashType type of the hashing function (see + {@link org.apache.hadoop.util.hash.Hash}).]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Bloom filter, as defined by Bloom in 1970. +

    + The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by + the networking research community in the past decade thanks to the bandwidth efficiencies that it + offers for the transmission of set membership information between networked hosts. A sender encodes + the information into a bit vector, the Bloom filter, that is more compact than a conventional + representation. Computation and space costs for construction are linear in the number of elements. + The receiver uses the filter to test whether various elements are members of the set. Though the + filter will occasionally return a false positive, it will never return a false negative. When creating + the filter, the sender can choose its desired point in a trade-off between the false positive rate and the size. + +

    + Originally created by + European Commission One-Lab Project 034819. + + @see Filter The general behavior of a filter + + @see Space/Time Trade-Offs in Hash Coding with Allowable Errors]]> + + + + + + + + + + + + + this filter. + @param nbHash The number of hash function to consider. + @param hashType type of the hashing function (see + {@link org.apache.hadoop.util.hash.Hash}).]]> + + + + + + + + + this counting Bloom filter. +

    + Invariant: nothing happens if the specified key does not belong to this counter Bloom filter. + @param key The key to remove.]]> + + + + + + + + + + + + key -> count map. +

    NOTE: due to the bucket size of this filter, inserting the same + key more than 15 times will cause an overflow at all filter positions + associated with this key, and it will significantly increase the error + rate for this and other keys. For this reason the filter can only be + used to store small count values 0 <= N << 15. + @param key key to be tested + @return 0 if the key is not present. Otherwise, a positive value v will + be returned such that v == count with probability equal to the + error rate of this filter, and v > count otherwise. + Additionally, if the filter experienced an underflow as a result of + {@link #delete(Key)} operation, the return value may be lower than the + count with the probability of the false negative rate of such + filter.]]> + + + + + + + + + + + + + + + + + + + + + + counting Bloom filter, as defined by Fan et al. in a ToN + 2000 paper. +

    + A counting Bloom filter is an improvement to standard a Bloom filter as it + allows dynamic additions and deletions of set membership information. This + is achieved through the use of a counting vector instead of a bit vector. +

    + Originally created by + European Commission One-Lab Project 034819. + + @see Filter The general behavior of a filter + + @see Summary cache: a scalable wide-area web cache sharing protocol]]> + + + + + + + + + + + + + + Builds an empty Dynamic Bloom filter. + @param vectorSize The number of bits in the vector. + @param nbHash The number of hash function to consider. + @param hashType type of the hashing function (see + {@link org.apache.hadoop.util.hash.Hash}). + @param nr The threshold for the maximum number of keys to record in a + dynamic Bloom filter row.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dynamic Bloom filter, as defined in the INFOCOM 2006 paper. +

    + A dynamic Bloom filter (DBF) makes use of a s * m bit matrix but + each of the s rows is a standard Bloom filter. The creation + process of a DBF is iterative. At the start, the DBF is a 1 * m + bit matrix, i.e., it is composed of a single standard Bloom filter. + It assumes that nr elements are recorded in the + initial bit vector, where nr <= n (n is + the cardinality of the set A to record in the filter). +

    + As the size of A grows during the execution of the application, + several keys must be inserted in the DBF. When inserting a key into the DBF, + one must first get an active Bloom filter in the matrix. A Bloom filter is + active when the number of recorded keys, nr, is + strictly less than the current cardinality of A, n. + If an active Bloom filter is found, the key is inserted and + nr is incremented by one. On the other hand, if there + is no active Bloom filter, a new one is created (i.e., a new row is added to + the matrix) according to the current size of A and the element + is added in this new Bloom filter and the nr value of + this new Bloom filter is set to one. A given key is said to belong to the + DBF if the k positions are set to one in one of the matrix rows. +

    + Originally created by + European Commission One-Lab Project 034819. + + @see Filter The general behavior of a filter + @see BloomFilter A Bloom filter + + @see Theory and Network Applications of Dynamic Bloom Filters]]> + + + + + + + + + Builds a hash function that must obey to a given maximum number of returned values and a highest value. + @param maxValue The maximum highest returned value. + @param nbHash The number of resulting hashed values. + @param hashType type of the hashing function (see {@link Hash}).]]> + + + + + this hash function. A NOOP]]> + + + + + + + + + + + + + + + + + + + The idea is to randomly select a bit to reset.]]> + + + + + + The idea is to select the bit to reset that will generate the minimum + number of false negative.]]> + + + + + + The idea is to select the bit to reset that will remove the maximum number + of false positive.]]> + + + + + + The idea is to select the bit to reset that will, at the same time, remove + the maximum number of false positve while minimizing the amount of false + negative generated.]]> + + + + + Originally created by + European Commission One-Lab Project 034819.]]> + + + + + + + + + + + + + + this filter. + @param nbHash The number of hash function to consider. + @param hashType type of the hashing function (see + {@link org.apache.hadoop.util.hash.Hash}).]]> + + + + + + + + + this retouched Bloom filter. +

    + Invariant: if the false positive is null, nothing happens. + @param key The false positive key to add.]]> + + + + + + this retouched Bloom filter. + @param coll The collection of false positive.]]> + + + + + + this retouched Bloom filter. + @param keys The list of false positive.]]> + + + + + + this retouched Bloom filter. + @param keys The array of false positive.]]> + + + + + + + this retouched Bloom filter. + @param scheme The selective clearing scheme to apply.]]> + + + + + + + + + + + + retouched Bloom filter, as defined in the CoNEXT 2006 paper. +

    + It allows the removal of selected false positives at the cost of introducing + random false negatives, and with the benefit of eliminating some random false + positives at the same time. + +

    + Originally created by + European Commission One-Lab Project 034819. + + @see Filter The general behavior of a filter + @see BloomFilter A Bloom filter + @see RemoveScheme The different selective clearing algorithms + + @see Retouched Bloom Filters: Allowing Networked Applications to Trade Off Selected False Positives Against False Negatives]]> + + + + + + + + + + diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 737db05635c74..e346c3cd2c353 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project-dist hadoop-common - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Common Apache Hadoop Common jar @@ -37,7 +37,6 @@ wsce-site.xml - org.apache.hadoop.thirdparty @@ -48,6 +47,11 @@ hadoop-annotations compile + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + com.google.guava guava @@ -94,8 +98,8 @@ compile - javax.activation - javax.activation-api + jakarta.activation + jakarta.activation-api runtime @@ -276,6 +280,11 @@ sshd-core test + + org.apache.ftpserver + ftpserver-core + test + org.apache.htrace @@ -357,6 +366,16 @@ wildfly-openssl-java provided + + org.xerial.snappy + snappy-java + compile + + + org.lz4 + lz4-java + provided + @@ -390,7 +409,12 @@ src-compile-protoc - false + + false + + ProtobufRpcEngine.proto + + src-test-compile-protoc @@ -406,6 +430,9 @@ replace-generated-sources false + + **/ProtobufRpcEngineProtos.java + @@ -418,6 +445,14 @@ replace-sources false + + + **/ProtobufHelper.java + **/RpcWritable.java + **/ProtobufRpcEngineCallback.java + **/ProtobufRpcEngine.java + **/ProtobufRpcEngineProtos.java + @@ -547,11 +582,6 @@ src/main/native/m4/* src/test/empty-file src/test/all-tests - src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h - src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c - src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h - src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c - src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h src/main/native/gtest/**/* src/test/resources/test-untar.tgz src/test/resources/test.har/_SUCCESS @@ -620,10 +650,6 @@ false - - - - false @@ -677,11 +703,7 @@ ${project.build.directory}/native/javah ${sun.arch.data.model} ${require.bzip2} - ${require.snappy} ${require.zstd} - ${snappy.prefix} - ${snappy.lib} - ${snappy.include} ${zstd.prefix} ${zstd.lib} ${zstd.include} @@ -736,14 +758,9 @@ - - - false - false - true @@ -843,10 +860,6 @@ /nologo /p:Configuration=Release /p:OutDir=${project.build.directory}/bin/ - /p:CustomSnappyPrefix=${snappy.prefix} - /p:CustomSnappyLib=${snappy.lib} - /p:CustomSnappyInclude=${snappy.include} - /p:RequireSnappy=${require.snappy} /p:CustomZstdPrefix=${zstd.prefix} /p:CustomZstdLib=${zstd.lib} /p:CustomZstdInclude=${zstd.include} @@ -1010,7 +1023,79 @@ - + + + aarch64 + + false + + aarch64 + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-source-legacy-protobuf + generate-sources + + add-source + + + + ${basedir}/src/main/arm-java + + + + + + + + + + + x86_64 + + false + + !aarch64 + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + src-compile-protoc-legacy + generate-sources + + compile + + + false + + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + + false + ${basedir}/src/main/proto + ${project.build.directory}/generated-sources/java + false + + ProtobufRpcEngine.proto + + + + + + + + diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt index 10591f6ce2aa8..d2ef03645a4ae 100644 --- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt +++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt @@ -67,33 +67,6 @@ else() endif() set(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES}) -# Require snappy. -set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) -hadoop_set_find_shared_library_version("1") -find_library(SNAPPY_LIBRARY - NAMES snappy - PATHS ${CUSTOM_SNAPPY_PREFIX} ${CUSTOM_SNAPPY_PREFIX}/lib - ${CUSTOM_SNAPPY_PREFIX}/lib64 ${CUSTOM_SNAPPY_LIB}) -set(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES}) -find_path(SNAPPY_INCLUDE_DIR - NAMES snappy.h - PATHS ${CUSTOM_SNAPPY_PREFIX} ${CUSTOM_SNAPPY_PREFIX}/include - ${CUSTOM_SNAPPY_INCLUDE}) -if(SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR) - get_filename_component(HADOOP_SNAPPY_LIBRARY ${SNAPPY_LIBRARY} NAME) - set(SNAPPY_SOURCE_FILES - "${SRC}/io/compress/snappy/SnappyCompressor.c" - "${SRC}/io/compress/snappy/SnappyDecompressor.c") - set(REQUIRE_SNAPPY ${REQUIRE_SNAPPY}) # Stop warning about unused variable. - message(STATUS "Found Snappy: ${SNAPPY_LIBRARY}") -else() - set(SNAPPY_INCLUDE_DIR "") - set(SNAPPY_SOURCE_FILES "") - if(REQUIRE_SNAPPY) - message(FATAL_ERROR "Required snappy library could not be found. SNAPPY_LIBRARY=${SNAPPY_LIBRARY}, SNAPPY_INCLUDE_DIR=${SNAPPY_INCLUDE_DIR}, CUSTOM_SNAPPY_INCLUDE_DIR=${CUSTOM_SNAPPY_INCLUDE_DIR}, CUSTOM_SNAPPY_PREFIX=${CUSTOM_SNAPPY_PREFIX}, CUSTOM_SNAPPY_INCLUDE=${CUSTOM_SNAPPY_INCLUDE}") - endif() -endif() - # Require zstandard SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) hadoop_set_find_shared_library_version("1") @@ -253,7 +226,6 @@ include_directories( ${JNI_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIRS} ${BZIP2_INCLUDE_DIR} - ${SNAPPY_INCLUDE_DIR} ${ISAL_INCLUDE_DIR} ${ZSTD_INCLUDE_DIR} ${OPENSSL_INCLUDE_DIR} @@ -264,12 +236,7 @@ configure_file(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h) set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) hadoop_add_dual_library(hadoop main/native/src/exception.c - ${SRC}/io/compress/lz4/Lz4Compressor.c - ${SRC}/io/compress/lz4/Lz4Decompressor.c - ${SRC}/io/compress/lz4/lz4.c - ${SRC}/io/compress/lz4/lz4hc.c ${ISAL_SOURCE_FILES} - ${SNAPPY_SOURCE_FILES} ${ZSTD_SOURCE_FILES} ${OPENSSL_SOURCE_FILES} ${SRC}/io/compress/zlib/ZlibCompressor.c diff --git a/hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java b/hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java new file mode 100644 index 0000000000000..28e28bf633784 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/arm-java/org/apache/hadoop/ipc/protobuf/ProtobufRpcEngineProtos.java @@ -0,0 +1,1163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// This is class is added to source because for arm protoc 2.5.0 executable +// is not available to generate the same code. +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: ProtobufRpcEngine.proto +package org.apache.hadoop.ipc.protobuf; + +public final class ProtobufRpcEngineProtos { + private ProtobufRpcEngineProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface RequestHeaderProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string methodName = 1; + /** + * required string methodName = 1; + * + *

    +     ** Name of the RPC method
    +     * 
    + */ + boolean hasMethodName(); + /** + * required string methodName = 1; + * + *
    +     ** Name of the RPC method
    +     * 
    + */ + java.lang.String getMethodName(); + /** + * required string methodName = 1; + * + *
    +     ** Name of the RPC method
    +     * 
    + */ + com.google.protobuf.ByteString + getMethodNameBytes(); + + // required string declaringClassProtocolName = 2; + /** + * required string declaringClassProtocolName = 2; + * + *
    +     **
    +     * RPCs for a particular interface (ie protocol) are done using a
    +     * IPC connection that is setup using rpcProxy.
    +     * The rpcProxy's has a declared protocol name that is
    +     * sent form client to server at connection time.
    +     *
    +     * Each Rpc call also sends a protocol name
    +     * (called declaringClassprotocolName). This name is usually the same
    +     * as the connection protocol name except in some cases.
    +     * For example metaProtocols such ProtocolInfoProto which get metainfo
    +     * about the protocol reuse the connection but need to indicate that
    +     * the actual protocol is different (i.e. the protocol is
    +     * ProtocolInfoProto) since they reuse the connection; in this case
    +     * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +     * 
    + */ + boolean hasDeclaringClassProtocolName(); + /** + * required string declaringClassProtocolName = 2; + * + *
    +     **
    +     * RPCs for a particular interface (ie protocol) are done using a
    +     * IPC connection that is setup using rpcProxy.
    +     * The rpcProxy's has a declared protocol name that is
    +     * sent form client to server at connection time.
    +     *
    +     * Each Rpc call also sends a protocol name
    +     * (called declaringClassprotocolName). This name is usually the same
    +     * as the connection protocol name except in some cases.
    +     * For example metaProtocols such ProtocolInfoProto which get metainfo
    +     * about the protocol reuse the connection but need to indicate that
    +     * the actual protocol is different (i.e. the protocol is
    +     * ProtocolInfoProto) since they reuse the connection; in this case
    +     * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +     * 
    + */ + java.lang.String getDeclaringClassProtocolName(); + /** + * required string declaringClassProtocolName = 2; + * + *
    +     **
    +     * RPCs for a particular interface (ie protocol) are done using a
    +     * IPC connection that is setup using rpcProxy.
    +     * The rpcProxy's has a declared protocol name that is
    +     * sent form client to server at connection time.
    +     *
    +     * Each Rpc call also sends a protocol name
    +     * (called declaringClassprotocolName). This name is usually the same
    +     * as the connection protocol name except in some cases.
    +     * For example metaProtocols such ProtocolInfoProto which get metainfo
    +     * about the protocol reuse the connection but need to indicate that
    +     * the actual protocol is different (i.e. the protocol is
    +     * ProtocolInfoProto) since they reuse the connection; in this case
    +     * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +     * 
    + */ + com.google.protobuf.ByteString + getDeclaringClassProtocolNameBytes(); + + // required uint64 clientProtocolVersion = 3; + /** + * required uint64 clientProtocolVersion = 3; + * + *
    +     ** protocol version of class declaring the called method
    +     * 
    + */ + boolean hasClientProtocolVersion(); + /** + * required uint64 clientProtocolVersion = 3; + * + *
    +     ** protocol version of class declaring the called method
    +     * 
    + */ + long getClientProtocolVersion(); + } + /** + * Protobuf type {@code hadoop.common.RequestHeaderProto} + * + *
    +   **
    +   * This message is the header for the Protobuf Rpc Engine
    +   * when sending a RPC request from  RPC client to the RPC server.
    +   * The actual request (serialized as protobuf) follows this request.
    +   *
    +   * No special header is needed for the Rpc Response for Protobuf Rpc Engine.
    +   * The normal RPC response header (see RpcHeader.proto) are sufficient.
    +   * 
    + */ + public static final class RequestHeaderProto extends + com.google.protobuf.GeneratedMessage + implements RequestHeaderProtoOrBuilder { + // Use RequestHeaderProto.newBuilder() to construct. + private RequestHeaderProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RequestHeaderProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RequestHeaderProto defaultInstance; + public static RequestHeaderProto getDefaultInstance() { + return defaultInstance; + } + + public RequestHeaderProto getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RequestHeaderProto( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + methodName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + clientProtocolVersion_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.class, org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RequestHeaderProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RequestHeaderProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string methodName = 1; + public static final int METHODNAME_FIELD_NUMBER = 1; + private java.lang.Object methodName_; + /** + * required string methodName = 1; + * + *
    +     ** Name of the RPC method
    +     * 
    + */ + public boolean hasMethodName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string methodName = 1; + * + *
    +     ** Name of the RPC method
    +     * 
    + */ + public java.lang.String getMethodName() { + java.lang.Object ref = methodName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + methodName_ = s; + } + return s; + } + } + /** + * required string methodName = 1; + * + *
    +     ** Name of the RPC method
    +     * 
    + */ + public com.google.protobuf.ByteString + getMethodNameBytes() { + java.lang.Object ref = methodName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + methodName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string declaringClassProtocolName = 2; + public static final int DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER = 2; + private java.lang.Object declaringClassProtocolName_; + /** + * required string declaringClassProtocolName = 2; + * + *
    +     **
    +     * RPCs for a particular interface (ie protocol) are done using a
    +     * IPC connection that is setup using rpcProxy.
    +     * The rpcProxy's has a declared protocol name that is
    +     * sent form client to server at connection time.
    +     *
    +     * Each Rpc call also sends a protocol name
    +     * (called declaringClassprotocolName). This name is usually the same
    +     * as the connection protocol name except in some cases.
    +     * For example metaProtocols such ProtocolInfoProto which get metainfo
    +     * about the protocol reuse the connection but need to indicate that
    +     * the actual protocol is different (i.e. the protocol is
    +     * ProtocolInfoProto) since they reuse the connection; in this case
    +     * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +     * 
    + */ + public boolean hasDeclaringClassProtocolName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string declaringClassProtocolName = 2; + * + *
    +     **
    +     * RPCs for a particular interface (ie protocol) are done using a
    +     * IPC connection that is setup using rpcProxy.
    +     * The rpcProxy's has a declared protocol name that is
    +     * sent form client to server at connection time.
    +     *
    +     * Each Rpc call also sends a protocol name
    +     * (called declaringClassprotocolName). This name is usually the same
    +     * as the connection protocol name except in some cases.
    +     * For example metaProtocols such ProtocolInfoProto which get metainfo
    +     * about the protocol reuse the connection but need to indicate that
    +     * the actual protocol is different (i.e. the protocol is
    +     * ProtocolInfoProto) since they reuse the connection; in this case
    +     * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +     * 
    + */ + public java.lang.String getDeclaringClassProtocolName() { + java.lang.Object ref = declaringClassProtocolName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + declaringClassProtocolName_ = s; + } + return s; + } + } + /** + * required string declaringClassProtocolName = 2; + * + *
    +     **
    +     * RPCs for a particular interface (ie protocol) are done using a
    +     * IPC connection that is setup using rpcProxy.
    +     * The rpcProxy's has a declared protocol name that is
    +     * sent form client to server at connection time.
    +     *
    +     * Each Rpc call also sends a protocol name
    +     * (called declaringClassprotocolName). This name is usually the same
    +     * as the connection protocol name except in some cases.
    +     * For example metaProtocols such ProtocolInfoProto which get metainfo
    +     * about the protocol reuse the connection but need to indicate that
    +     * the actual protocol is different (i.e. the protocol is
    +     * ProtocolInfoProto) since they reuse the connection; in this case
    +     * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +     * 
    + */ + public com.google.protobuf.ByteString + getDeclaringClassProtocolNameBytes() { + java.lang.Object ref = declaringClassProtocolName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + declaringClassProtocolName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint64 clientProtocolVersion = 3; + public static final int CLIENTPROTOCOLVERSION_FIELD_NUMBER = 3; + private long clientProtocolVersion_; + /** + * required uint64 clientProtocolVersion = 3; + * + *
    +     ** protocol version of class declaring the called method
    +     * 
    + */ + public boolean hasClientProtocolVersion() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
    +     ** protocol version of class declaring the called method
    +     * 
    + */ + public long getClientProtocolVersion() { + return clientProtocolVersion_; + } + + private void initFields() { + methodName_ = ""; + declaringClassProtocolName_ = ""; + clientProtocolVersion_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasMethodName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasDeclaringClassProtocolName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasClientProtocolVersion()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getMethodNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getDeclaringClassProtocolNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, clientProtocolVersion_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getMethodNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getDeclaringClassProtocolNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, clientProtocolVersion_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto)) { + return super.equals(obj); + } + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto other = (org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) obj; + + boolean result = true; + result = result && (hasMethodName() == other.hasMethodName()); + if (hasMethodName()) { + result = result && getMethodName() + .equals(other.getMethodName()); + } + result = result && (hasDeclaringClassProtocolName() == other.hasDeclaringClassProtocolName()); + if (hasDeclaringClassProtocolName()) { + result = result && getDeclaringClassProtocolName() + .equals(other.getDeclaringClassProtocolName()); + } + result = result && (hasClientProtocolVersion() == other.hasClientProtocolVersion()); + if (hasClientProtocolVersion()) { + result = result && (getClientProtocolVersion() + == other.getClientProtocolVersion()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasMethodName()) { + hash = (37 * hash) + METHODNAME_FIELD_NUMBER; + hash = (53 * hash) + getMethodName().hashCode(); + } + if (hasDeclaringClassProtocolName()) { + hash = (37 * hash) + DECLARINGCLASSPROTOCOLNAME_FIELD_NUMBER; + hash = (53 * hash) + getDeclaringClassProtocolName().hashCode(); + } + if (hasClientProtocolVersion()) { + hash = (37 * hash) + CLIENTPROTOCOLVERSION_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getClientProtocolVersion()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hadoop.common.RequestHeaderProto} + * + *
    +     **
    +     * This message is the header for the Protobuf Rpc Engine
    +     * when sending a RPC request from  RPC client to the RPC server.
    +     * The actual request (serialized as protobuf) follows this request.
    +     *
    +     * No special header is needed for the Rpc Response for Protobuf Rpc Engine.
    +     * The normal RPC response header (see RpcHeader.proto) are sufficient.
    +     * 
    + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProtoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.class, org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.Builder.class); + } + + // Construct using org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + methodName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + declaringClassProtocolName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + clientProtocolVersion_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.internal_static_hadoop_common_RequestHeaderProto_descriptor; + } + + public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto getDefaultInstanceForType() { + return org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.getDefaultInstance(); + } + + public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto build() { + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto buildPartial() { + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto result = new org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.methodName_ = methodName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.declaringClassProtocolName_ = declaringClassProtocolName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.clientProtocolVersion_ = clientProtocolVersion_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) { + return mergeFrom((org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto other) { + if (other == org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto.getDefaultInstance()) return this; + if (other.hasMethodName()) { + bitField0_ |= 0x00000001; + methodName_ = other.methodName_; + onChanged(); + } + if (other.hasDeclaringClassProtocolName()) { + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = other.declaringClassProtocolName_; + onChanged(); + } + if (other.hasClientProtocolVersion()) { + setClientProtocolVersion(other.getClientProtocolVersion()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasMethodName()) { + + return false; + } + if (!hasDeclaringClassProtocolName()) { + + return false; + } + if (!hasClientProtocolVersion()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string methodName = 1; + private java.lang.Object methodName_ = ""; + /** + * required string methodName = 1; + * + *
    +       ** Name of the RPC method
    +       * 
    + */ + public boolean hasMethodName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string methodName = 1; + * + *
    +       ** Name of the RPC method
    +       * 
    + */ + public java.lang.String getMethodName() { + java.lang.Object ref = methodName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + methodName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string methodName = 1; + * + *
    +       ** Name of the RPC method
    +       * 
    + */ + public com.google.protobuf.ByteString + getMethodNameBytes() { + java.lang.Object ref = methodName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + methodName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string methodName = 1; + * + *
    +       ** Name of the RPC method
    +       * 
    + */ + public Builder setMethodName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + methodName_ = value; + onChanged(); + return this; + } + /** + * required string methodName = 1; + * + *
    +       ** Name of the RPC method
    +       * 
    + */ + public Builder clearMethodName() { + bitField0_ = (bitField0_ & ~0x00000001); + methodName_ = getDefaultInstance().getMethodName(); + onChanged(); + return this; + } + /** + * required string methodName = 1; + * + *
    +       ** Name of the RPC method
    +       * 
    + */ + public Builder setMethodNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + methodName_ = value; + onChanged(); + return this; + } + + // required string declaringClassProtocolName = 2; + private java.lang.Object declaringClassProtocolName_ = ""; + /** + * required string declaringClassProtocolName = 2; + * + *
    +       **
    +       * RPCs for a particular interface (ie protocol) are done using a
    +       * IPC connection that is setup using rpcProxy.
    +       * The rpcProxy's has a declared protocol name that is
    +       * sent form client to server at connection time.
    +       *
    +       * Each Rpc call also sends a protocol name
    +       * (called declaringClassprotocolName). This name is usually the same
    +       * as the connection protocol name except in some cases.
    +       * For example metaProtocols such ProtocolInfoProto which get metainfo
    +       * about the protocol reuse the connection but need to indicate that
    +       * the actual protocol is different (i.e. the protocol is
    +       * ProtocolInfoProto) since they reuse the connection; in this case
    +       * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +       * 
    + */ + public boolean hasDeclaringClassProtocolName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string declaringClassProtocolName = 2; + * + *
    +       **
    +       * RPCs for a particular interface (ie protocol) are done using a
    +       * IPC connection that is setup using rpcProxy.
    +       * The rpcProxy's has a declared protocol name that is
    +       * sent form client to server at connection time.
    +       *
    +       * Each Rpc call also sends a protocol name
    +       * (called declaringClassprotocolName). This name is usually the same
    +       * as the connection protocol name except in some cases.
    +       * For example metaProtocols such ProtocolInfoProto which get metainfo
    +       * about the protocol reuse the connection but need to indicate that
    +       * the actual protocol is different (i.e. the protocol is
    +       * ProtocolInfoProto) since they reuse the connection; in this case
    +       * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +       * 
    + */ + public java.lang.String getDeclaringClassProtocolName() { + java.lang.Object ref = declaringClassProtocolName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + declaringClassProtocolName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string declaringClassProtocolName = 2; + * + *
    +       **
    +       * RPCs for a particular interface (ie protocol) are done using a
    +       * IPC connection that is setup using rpcProxy.
    +       * The rpcProxy's has a declared protocol name that is
    +       * sent form client to server at connection time.
    +       *
    +       * Each Rpc call also sends a protocol name
    +       * (called declaringClassprotocolName). This name is usually the same
    +       * as the connection protocol name except in some cases.
    +       * For example metaProtocols such ProtocolInfoProto which get metainfo
    +       * about the protocol reuse the connection but need to indicate that
    +       * the actual protocol is different (i.e. the protocol is
    +       * ProtocolInfoProto) since they reuse the connection; in this case
    +       * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +       * 
    + */ + public com.google.protobuf.ByteString + getDeclaringClassProtocolNameBytes() { + java.lang.Object ref = declaringClassProtocolName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + declaringClassProtocolName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string declaringClassProtocolName = 2; + * + *
    +       **
    +       * RPCs for a particular interface (ie protocol) are done using a
    +       * IPC connection that is setup using rpcProxy.
    +       * The rpcProxy's has a declared protocol name that is
    +       * sent form client to server at connection time.
    +       *
    +       * Each Rpc call also sends a protocol name
    +       * (called declaringClassprotocolName). This name is usually the same
    +       * as the connection protocol name except in some cases.
    +       * For example metaProtocols such ProtocolInfoProto which get metainfo
    +       * about the protocol reuse the connection but need to indicate that
    +       * the actual protocol is different (i.e. the protocol is
    +       * ProtocolInfoProto) since they reuse the connection; in this case
    +       * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +       * 
    + */ + public Builder setDeclaringClassProtocolName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = value; + onChanged(); + return this; + } + /** + * required string declaringClassProtocolName = 2; + * + *
    +       **
    +       * RPCs for a particular interface (ie protocol) are done using a
    +       * IPC connection that is setup using rpcProxy.
    +       * The rpcProxy's has a declared protocol name that is
    +       * sent form client to server at connection time.
    +       *
    +       * Each Rpc call also sends a protocol name
    +       * (called declaringClassprotocolName). This name is usually the same
    +       * as the connection protocol name except in some cases.
    +       * For example metaProtocols such ProtocolInfoProto which get metainfo
    +       * about the protocol reuse the connection but need to indicate that
    +       * the actual protocol is different (i.e. the protocol is
    +       * ProtocolInfoProto) since they reuse the connection; in this case
    +       * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +       * 
    + */ + public Builder clearDeclaringClassProtocolName() { + bitField0_ = (bitField0_ & ~0x00000002); + declaringClassProtocolName_ = getDefaultInstance().getDeclaringClassProtocolName(); + onChanged(); + return this; + } + /** + * required string declaringClassProtocolName = 2; + * + *
    +       **
    +       * RPCs for a particular interface (ie protocol) are done using a
    +       * IPC connection that is setup using rpcProxy.
    +       * The rpcProxy's has a declared protocol name that is
    +       * sent form client to server at connection time.
    +       *
    +       * Each Rpc call also sends a protocol name
    +       * (called declaringClassprotocolName). This name is usually the same
    +       * as the connection protocol name except in some cases.
    +       * For example metaProtocols such ProtocolInfoProto which get metainfo
    +       * about the protocol reuse the connection but need to indicate that
    +       * the actual protocol is different (i.e. the protocol is
    +       * ProtocolInfoProto) since they reuse the connection; in this case
    +       * the declaringClassProtocolName field is set to the ProtocolInfoProto
    +       * 
    + */ + public Builder setDeclaringClassProtocolNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + declaringClassProtocolName_ = value; + onChanged(); + return this; + } + + // required uint64 clientProtocolVersion = 3; + private long clientProtocolVersion_ ; + /** + * required uint64 clientProtocolVersion = 3; + * + *
    +       ** protocol version of class declaring the called method
    +       * 
    + */ + public boolean hasClientProtocolVersion() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
    +       ** protocol version of class declaring the called method
    +       * 
    + */ + public long getClientProtocolVersion() { + return clientProtocolVersion_; + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
    +       ** protocol version of class declaring the called method
    +       * 
    + */ + public Builder setClientProtocolVersion(long value) { + bitField0_ |= 0x00000004; + clientProtocolVersion_ = value; + onChanged(); + return this; + } + /** + * required uint64 clientProtocolVersion = 3; + * + *
    +       ** protocol version of class declaring the called method
    +       * 
    + */ + public Builder clearClientProtocolVersion() { + bitField0_ = (bitField0_ & ~0x00000004); + clientProtocolVersion_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hadoop.common.RequestHeaderProto) + } + + static { + defaultInstance = new RequestHeaderProto(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hadoop.common.RequestHeaderProto) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hadoop_common_RequestHeaderProto_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\027ProtobufRpcEngine.proto\022\rhadoop.common" + + "\"k\n\022RequestHeaderProto\022\022\n\nmethodName\030\001 \002" + + "(\t\022\"\n\032declaringClassProtocolName\030\002 \002(\t\022\035" + + "\n\025clientProtocolVersion\030\003 \002(\004B<\n\036org.apa" + + "che.hadoop.ipc.protobufB\027ProtobufRpcEngi" + + "neProtos\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hadoop_common_RequestHeaderProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hadoop_common_RequestHeaderProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hadoop_common_RequestHeaderProto_descriptor, + new java.lang.String[] { "MethodName", "DeclaringClassProtocolName", "ClientProtocolVersion", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh index eb7285fb4e667..cff60129eb9fd 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh @@ -596,11 +596,6 @@ function hadoop_bootstrap YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"} MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"} MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"} - HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"} - HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"} - OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"} - OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"} - OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"} HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}} HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"} @@ -1342,7 +1337,7 @@ function hadoop_add_to_classpath_tools # shellcheck disable=SC1090 . "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" else - hadoop_error "ERROR: Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found." + hadoop_debug "Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found." fi if declare -f hadoop_classpath_tools_${module} >/dev/null 2>&1; then diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index e43cd95b047ee..f4625f5999b1c 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -390,15 +390,6 @@ export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} # # export HDFS_DFSROUTER_OPTS="" -### -# Ozone Manager specific parameters -### -# Specify the JVM options to be used when starting the Ozone Manager. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_OM_OPTS="" - ### # HDFS StorageContainerManager specific parameters ### diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 7f9ea462679b3..52d2c1ff038e6 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -282,13 +282,6 @@ log4j.appender.NMAUDIT.MaxBackupIndex=${nm.audit.log.maxbackupindex} #log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log #log4j.appender.nodemanagerrequestlog.RetainDays=3 -#Http Server request logs for Ozone S3Gateway -log4j.logger.http.requests.s3gateway=INFO,s3gatewayrequestlog -log4j.appender.s3gatewayrequestlog=org.apache.hadoop.http.HttpRequestLogAppender -log4j.appender.s3gatewayrequestlog.Filename=${hadoop.log.dir}/jetty-s3gateway-yyyy_mm_dd.log -log4j.appender.s3gatewayrequestlog.RetainDays=3 - - # WebHdfs request log on datanodes # Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to # direct the log to a separate file. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java index cce744e5076f5..836f3819f8baf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java @@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.http.HttpServer2; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A servlet to print out the running configuration data. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 9a8841e701b96..aedde6b5e62bb 100755 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -24,7 +24,7 @@ import com.ctc.wstx.stax.WstxInputFactory; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.BufferedInputStream; import java.io.DataInput; @@ -40,6 +40,8 @@ import java.lang.ref.WeakReference; import java.net.InetSocketAddress; import java.net.JarURLConnection; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.net.URLConnection; import java.nio.file.Files; @@ -81,7 +83,7 @@ import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.commons.collections.map.UnmodifiableMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -105,8 +107,8 @@ import org.w3c.dom.Document; import org.w3c.dom.Element; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.commons.lang3.StringUtils.isNotBlank; @@ -1025,11 +1027,11 @@ public synchronized void reloadConfiguration() { properties = null; // trigger reload finalParameters.clear(); // clear site-limits } - + private synchronized void addResourceObject(Resource resource) { resources.add(resource); // add to resources restrictSystemProps |= resource.isParserRestricted(); - reloadConfiguration(); + loadProps(properties, resources.size() - 1, false); } private static final int MAX_SUBST = 20; @@ -2870,12 +2872,27 @@ public Set getFinalParameters() { protected synchronized Properties getProps() { if (properties == null) { properties = new Properties(); - Map backup = updatingResource != null ? - new ConcurrentHashMap(updatingResource) : null; - loadResources(properties, resources, quietmode); + loadProps(properties, 0, true); + } + return properties; + } + /** + * Loads the resource at a given index into the properties. + * @param props the object containing the loaded properties. + * @param startIdx the index where the new resource has been added. + * @param fullReload flag whether we do complete reload of the conf instead + * of just loading the new resource. + */ + private synchronized void loadProps(final Properties props, + final int startIdx, final boolean fullReload) { + if (props != null) { + Map backup = + updatingResource != null + ? new ConcurrentHashMap<>(updatingResource) : null; + loadResources(props, resources, startIdx, fullReload, quietmode); if (overlay != null) { - properties.putAll(overlay); + props.putAll(overlay); if (backup != null) { for (Map.Entry item : overlay.entrySet()) { String key = (String) item.getKey(); @@ -2887,7 +2904,6 @@ protected synchronized Properties getProps() { } } } - return properties; } /** @@ -2989,14 +3005,16 @@ private XMLStreamReader parse(InputStream is, String systemIdStr, private void loadResources(Properties properties, ArrayList resources, + int startIdx, + boolean fullReload, boolean quiet) { - if(loadDefaults) { + if(loadDefaults && fullReload) { for (String resource : defaultResources) { loadResource(properties, new Resource(resource, false), quiet); } } - for (int i = 0; i < resources.size(); i++) { + for (int i = startIdx; i < resources.size(); i++) { Resource ret = loadResource(properties, resources.get(i), quiet); if (ret != null) { resources.set(i, ret); @@ -3243,7 +3261,15 @@ private void handleInclude() throws XMLStreamException, IOException { File href = new File(confInclude); if (!href.isAbsolute()) { // Included resources are relative to the current resource - File baseFile = new File(name).getParentFile(); + File baseFile; + + try { + baseFile = new File(new URI(name)); + } catch (IllegalArgumentException | URISyntaxException e) { + baseFile = new File(name); + } + + baseFile = baseFile.getParentFile(); href = new File(baseFile, href.getPath()); } if (!href.exists()) { @@ -3865,6 +3891,7 @@ public Map getValByRegex(String regex) { Pattern p = Pattern.compile(regex); Map result = new HashMap(); + List resultKeys = new ArrayList<>(); Matcher m; for(Map.Entry item: getProps().entrySet()) { @@ -3872,11 +3899,12 @@ public Map getValByRegex(String regex) { item.getValue() instanceof String) { m = p.matcher((String)item.getKey()); if(m.find()) { // match - result.put((String) item.getKey(), - substituteVars(getProps().getProperty((String) item.getKey()))); + resultKeys.add((String) item.getKey()); } } } + resultKeys.forEach(item -> + result.put(item, substituteVars(getProps().getProperty(item)))); return result; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java index 8cacbdcdac039..0352025ada154 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java @@ -18,9 +18,9 @@ package org.apache.hadoop.conf; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.Time; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java index 3e52560259638..a1ddca6e20967 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index bcf4a65ec24d4..3061020f1e6fd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -31,8 +31,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Splitter; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java index 9e601e26cf944..0c156e3548d21 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java @@ -30,7 +30,7 @@ import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.ByteBufferPositionedReadable; @@ -46,9 +46,13 @@ import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.StreamCapabilitiesPolicy; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.util.StringUtils; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; + /** * CryptoInputStream decrypts data. It is not thread-safe. AES CTR mode is * required in order to ensure that the plain text and cipher text have a 1:1 @@ -66,7 +70,7 @@ public class CryptoInputStream extends FilterInputStream implements Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, ReadableByteChannel, CanUnbuffer, StreamCapabilities, - ByteBufferPositionedReadable { + ByteBufferPositionedReadable, IOStatisticsSource { private final byte[] oneByteBuf = new byte[1]; private final CryptoCodec codec; private final Decryptor decryptor; @@ -867,8 +871,16 @@ public boolean hasCapability(String capability) { + " does not expose its stream capabilities."); } return ((StreamCapabilities) in).hasCapability(capability); + case StreamCapabilities.IOSTATISTICS: + return (in instanceof StreamCapabilities) + && ((StreamCapabilities) in).hasCapability(capability); default: return false; } } + + @Override + public IOStatistics getIOStatistics() { + return retrieveIOStatistics(in); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java index 8d11043937612..38c430fcd99c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java @@ -28,8 +28,13 @@ import org.apache.hadoop.fs.CanSetDropBehind; import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; /** * CryptoOutputStream encrypts data. It is not thread-safe. AES CTR mode is @@ -48,7 +53,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class CryptoOutputStream extends FilterOutputStream implements - Syncable, CanSetDropBehind, StreamCapabilities { + Syncable, CanSetDropBehind, StreamCapabilities, IOStatisticsSource { private final byte[] oneByteBuf = new byte[1]; private final CryptoCodec codec; private final Encryptor encryptor; @@ -308,9 +313,11 @@ private void freeBuffers() { @Override public boolean hasCapability(String capability) { - if (out instanceof StreamCapabilities) { - return ((StreamCapabilities) out).hasCapability(capability); - } - return false; + return StoreImplementationUtils.hasCapability(out, capability); + } + + @Override + public IOStatistics getIOStatistics() { + return retrieveIOStatistics(out); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java index b55f84226d3cd..9d2b4b9e23389 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.util.CleanerUtil; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java index de0e5dd6268f2..1c670f76f4859 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java index a127925a7a538..0963cb6005ed8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.crypto.random.OpensslSecureRandom; import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java index 0a2ba52e555e5..0fe81d7ca1087 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.NativeCodeLoader; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.PerformanceAdvisory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java index 7a66e1e4bab48..4f456e54a4d61 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java @@ -22,9 +22,9 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; /** * A KeyProviderExtension implementation providing a short lived diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java index 7951af56bc8f9..3c3099e113567 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java @@ -18,7 +18,7 @@ package org.apache.hadoop.crypto.key; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -32,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import javax.crypto.spec.SecretKeySpec; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java index 00d7a7dfce0f7..8aa64e2ceb6ae 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java @@ -29,7 +29,7 @@ import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.crypto.CryptoCodec; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java index 05d99ed0810fc..e23d8b8e4a774 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.crypto.key; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.Credentials; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java index f2f6f1801c2dd..2cc011c0df34d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java @@ -25,7 +25,7 @@ import java.util.List; import java.util.Map; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index 71ed4557b357b..bc56f0e28676b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -79,9 +79,9 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import static org.apache.hadoop.util.KMSUtil.checkNotEmpty; import static org.apache.hadoop.util.KMSUtil.checkNotNull; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java index ee2295cff77f3..4d19ea32e7fc6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java @@ -50,8 +50,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A simple LoadBalancing KMSClientProvider that round-robins requests diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java index 7d26acbf21a03..5ec78b3bd50ba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java @@ -33,11 +33,11 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.common.base.Preconditions; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.classification.InterfaceAudience; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java index 1863f5ec2035f..a7a609ce440b6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.NativeCodeLoader; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.PerformanceAdvisory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Abortable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Abortable.java new file mode 100644 index 0000000000000..d2fd174795831 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Abortable.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Abort data being written to a stream, so that close() does + * not write the data. It is implemented by output streams in + * some object stores, and passed through {@link FSDataOutputStream}. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface Abortable { + + /** + * Abort the active operation without the output becoming visible. + * + * This is to provide ability to cancel the write on stream; once + * a stream is aborted, the write MUST NOT become visible. + * + * @throws UnsupportedOperationException if the operation is not supported. + * @return the result. + */ + AbortableResult abort(); + + /** + * Interface for the result of aborts; allows subclasses to extend + * (IOStatistics etc) or for future enhancements if ever needed. + */ + interface AbortableResult { + + /** + * Was the stream already closed/aborted? + * @return true if a close/abort operation had already + * taken place. + */ + boolean alreadyClosed(); + + /** + * Any exception caught during cleanup operations, + * exceptions whose raising/catching does not change + * the semantics of the abort. + * @return an exception or null. + */ + IOException anyCleanupException(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 1df68b647c99a..b911e0d508422 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -56,7 +56,7 @@ import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.Progressable; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -865,6 +865,20 @@ public abstract FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException; + /** + * Synchronize client metadata state. + *

    + * In some FileSystem implementations such as HDFS metadata + * synchronization is essential to guarantee consistency of read requests + * particularly in HA setting. + * @throws IOException + * @throws UnsupportedOperationException + */ + public void msync() throws IOException, UnsupportedOperationException { + throw new UnsupportedOperationException(getClass().getCanonicalName() + + " does not support method msync"); + } + /** * The specification of this method matches that of * {@link FileContext#access(Path, FsAction)} @@ -1383,4 +1397,34 @@ public boolean hasPathCapability(final Path path, return false; } } + + /** + * Create a multipart uploader. + * @param basePath file path under which all files are uploaded + * @return a MultipartUploaderBuilder object to build the uploader + * @throws IOException if some early checks cause IO failures. + * @throws UnsupportedOperationException if support is checked early. + */ + @InterfaceStability.Unstable + public MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException { + methodNotSupported(); + return null; + } + + /** + * Helper method that throws an {@link UnsupportedOperationException} for the + * current {@link FileSystem} method being called. + */ + protected final void methodNotSupported() { + // The order of the stacktrace elements is (from top to bottom): + // - java.lang.Thread.getStackTrace + // - org.apache.hadoop.fs.FileSystem.methodNotSupported + // - + // therefore, to find out the current method name, we use the element at + // index 2. + String name = Thread.currentThread().getStackTrace()[2].getMethodName(); + throw new UnsupportedOperationException(getClass().getCanonicalName() + + " does not support method " + name); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java index 973b136bb3ab2..59345f5d25caf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java @@ -21,9 +21,14 @@ import java.io.EOFException; import java.io.FileDescriptor; import java.io.IOException; +import java.util.StringJoiner; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; + +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; /** @@ -33,7 +38,8 @@ @InterfaceAudience.Private @InterfaceStability.Unstable public class BufferedFSInputStream extends BufferedInputStream -implements Seekable, PositionedReadable, HasFileDescriptor { + implements Seekable, PositionedReadable, HasFileDescriptor, + IOStatisticsSource, StreamCapabilities { /** * Creates a BufferedFSInputStream * with the specified buffer size, @@ -126,4 +132,34 @@ public FileDescriptor getFileDescriptor() throws IOException { return null; } } + + /** + * If the inner stream supports {@link StreamCapabilities}, + * forward the probe to it. + * Otherwise: return false. + * + * @param capability string to query the stream support for. + * @return true if a capability is known to be supported. + */ + @Override + public boolean hasCapability(final String capability) { + if (in instanceof StreamCapabilities) { + return ((StreamCapabilities) in).hasCapability(capability); + } else { + return false; + } + } + + @Override + public IOStatistics getIOStatistics() { + return retrieveIOStatistics(in); + } + + @Override + public String toString() { + return new StringJoiner(", ", + BufferedFSInputStream.class.getSimpleName() + "[", "]") + .add("in=" + in) + .toString(); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java index c31c29b5b6d31..5708c906fe764 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.ByteBufferPool; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanSetDropBehind.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanSetDropBehind.java index 2e2d98b9c5462..0077838920a9e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanSetDropBehind.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanSetDropBehind.java @@ -36,6 +36,6 @@ public interface CanSetDropBehind { * UnsupportedOperationException If this stream doesn't support * setting the drop-behind. */ - public void setDropBehind(Boolean dropCache) + void setDropBehind(Boolean dropCache) throws IOException, UnsupportedOperationException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java index cc9c284c9fa55..0256a58f46368 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java @@ -29,7 +29,7 @@ import java.util.List; import java.util.concurrent.CompletableFuture; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -38,11 +38,15 @@ import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.IOStatisticsSupport; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.Progressable; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; +import static org.apache.hadoop.fs.impl.StoreImplementationUtils.isProbeForSyncable; /**************************************************************** * Abstract Checksumed FileSystem. @@ -134,7 +138,8 @@ private int getSumBufferSize(int bytesPerSum, int bufferSize) { * For open()'s FSInputStream * It verifies that data matches checksums. *******************************************************/ - private static class ChecksumFSInputChecker extends FSInputChecker { + private static class ChecksumFSInputChecker extends FSInputChecker implements + IOStatisticsSource { private ChecksumFileSystem fs; private FSDataInputStream datas; private FSDataInputStream sums; @@ -270,6 +275,17 @@ protected int readChunk(long pos, byte[] buf, int offset, int len, } return nread; } + + /** + * Get the IO Statistics of the nested stream, falling back to + * null if the stream does not implement the interface + * {@link IOStatisticsSource}. + * @return an IOStatistics instance or null + */ + @Override + public IOStatistics getIOStatistics() { + return IOStatisticsSupport.retrieveIOStatistics(datas); + } } private static class FSDataBoundedInputStream extends FSDataInputStream { @@ -395,7 +411,8 @@ public static long getChecksumLength(long size, int bytesPerSum) { /** This class provides an output stream for a checksummed file. * It generates checksums for data. */ - private static class ChecksumFSOutputSummer extends FSOutputSummer { + private static class ChecksumFSOutputSummer extends FSOutputSummer + implements IOStatisticsSource, StreamCapabilities { private FSDataOutputStream datas; private FSDataOutputStream sums; private static final float CHKSUM_AS_FRACTION = 0.01f; @@ -449,6 +466,31 @@ protected void checkClosed() throws IOException { throw new ClosedChannelException(); } } + + /** + * Get the IO Statistics of the nested stream, falling back to + * null if the stream does not implement the interface + * {@link IOStatisticsSource}. + * @return an IOStatistics instance or null + */ + @Override + public IOStatistics getIOStatistics() { + return IOStatisticsSupport.retrieveIOStatistics(datas); + } + + /** + * Probe the inner stream for a capability. + * Syncable operations are rejected before being passed down. + * @param capability string to query the stream support for. + * @return true if a capability is known to be supported. + */ + @Override + public boolean hasCapability(final String capability) { + if (isProbeForSyncable(capability)) { + return false; + } + return datas.hasCapability(capability); + } } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index a68012b06d2bc..0044b6ac6c317 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -164,6 +164,27 @@ public class CommonConfigurationKeysPublic { public static final String FS_AUTOMATIC_CLOSE_KEY = "fs.automatic.close"; /** Default value for FS_AUTOMATIC_CLOSE_KEY */ public static final boolean FS_AUTOMATIC_CLOSE_DEFAULT = true; + + /** + * Number of filesystems instances can be created in parallel. + *

    + * A higher number here does not necessarily improve performance, especially + * for object stores, where multiple threads may be attempting to create an FS + * instance for the same URI. + *

    + * Default value: {@value}. + */ + public static final String FS_CREATION_PARALLEL_COUNT = + "fs.creation.parallel.count"; + + /** + * Default value for {@link #FS_CREATION_PARALLEL_COUNT}. + *

    + * Default value: {@value}. + */ + public static final int FS_CREATION_PARALLEL_COUNT_DEFAULT = + 64; + /** * @see * @@ -988,5 +1009,14 @@ public class CommonConfigurationKeysPublic { public static final String HADOOP_PROMETHEUS_ENABLED = "hadoop.prometheus.endpoint.enabled"; public static final boolean HADOOP_PROMETHEUS_ENABLED_DEFAULT = false; + + /** + * @see + * + * core-default.xml + */ + public static final String HADOOP_HTTP_IDLE_TIMEOUT_MS_KEY = + "hadoop.http.idle_timeout.ms"; + public static final int HADOOP_HTTP_IDLE_TIMEOUT_MS_DEFAULT = 60000; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java index fb46ef81e36fa..df932df43aebd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java @@ -131,4 +131,19 @@ private CommonPathCapabilities() { @InterfaceStability.Unstable public static final String FS_EXPERIMENTAL_BATCH_LISTING = "fs.capability.batch.listing"; + + /** + * Does the store support multipart uploading? + * Value: {@value}. + */ + public static final String FS_MULTIPART_UPLOADER = + "fs.capability.multipart.uploader"; + + + /** + * Stream abort() capability implemented by {@link Abortable#abort()}. + * Value: {@value}. + */ + public static final String ABORTABLE_STREAM = + "fs.capability.outputstream.abortable"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java index 7a94088e4062d..30c2faeb24d1f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java @@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** Filesystem disk space usage statistics. * Uses the unix 'df' program to get mount points, and java.io.File for diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java index 6e374c97c3eda..89ac7c3e7cbbe 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java index 2feb9375255c3..193c52c60d949 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.lang.ref.WeakReference; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java index 31f82975899e1..b143a4cb63d19 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java @@ -29,6 +29,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.IOStatisticsSupport; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.util.IdentityHashStore; @@ -40,7 +44,7 @@ public class FSDataInputStream extends DataInputStream implements Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, CanUnbuffer, StreamCapabilities, - ByteBufferPositionedReadable { + ByteBufferPositionedReadable, IOStatisticsSource { /** * Map ByteBuffers that we have handed out to readers to ByteBufferPool * objects @@ -234,10 +238,7 @@ public void unbuffer() { @Override public boolean hasCapability(String capability) { - if (in instanceof StreamCapabilities) { - return ((StreamCapabilities) in).hasCapability(capability); - } - return false; + return StoreImplementationUtils.hasCapability(in, capability); } /** @@ -267,4 +268,15 @@ public void readFully(long position, ByteBuffer buf) throws IOException { "unsupported by " + in.getClass().getCanonicalName()); } } + + /** + * Get the IO Statistics of the nested stream, falling back to + * null if the stream does not implement the interface + * {@link IOStatisticsSource}. + * @return an IOStatistics instance or null + */ + @Override + public IOStatistics getIOStatistics() { + return IOStatisticsSupport.retrieveIOStatistics(in); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java index 5b604e58e2360..94c56b713c1eb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java @@ -24,13 +24,18 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.IOStatisticsSupport; /** Utility that wraps a {@link OutputStream} in a {@link DataOutputStream}. */ @InterfaceAudience.Public @InterfaceStability.Stable public class FSDataOutputStream extends DataOutputStream - implements Syncable, CanSetDropBehind, StreamCapabilities { + implements Syncable, CanSetDropBehind, StreamCapabilities, + IOStatisticsSource, Abortable { private final OutputStream wrappedStream; private static class PositionCache extends FilterOutputStream { @@ -122,10 +127,7 @@ public OutputStream getWrappedStream() { @Override public boolean hasCapability(String capability) { - if (wrappedStream instanceof StreamCapabilities) { - return ((StreamCapabilities) wrappedStream).hasCapability(capability); - } - return false; + return StoreImplementationUtils.hasCapability(wrappedStream, capability); } @Override // Syncable @@ -155,4 +157,32 @@ public void setDropBehind(Boolean dropBehind) throws IOException { "not support setting the drop-behind caching setting."); } } + + /** + * Get the IO Statistics of the nested stream, falling back to + * empty statistics if the stream does not implement the interface + * {@link IOStatisticsSource}. + * @return an IOStatistics instance. + */ + @Override + public IOStatistics getIOStatistics() { + return IOStatisticsSupport.retrieveIOStatistics(wrappedStream); + } + + /** + * Invoke {@code abort()} on the wrapped stream if it + * is Abortable, otherwise raise an + * {@code UnsupportedOperationException}. + * @throws UnsupportedOperationException if not available. + * @return the result. + */ + @Override + public AbortableResult abort() { + if (wrappedStream instanceof Abortable) { + return ((Abortable) wrappedStream).abort(); + } else { + throw new UnsupportedOperationException( + FSExceptionMessages.ABORTABLE_UNSUPPORTED); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java index 62a3182dfba20..7a6792817b750 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java @@ -28,7 +28,7 @@ import java.io.IOException; import java.util.EnumSet; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java index a8e7b71bb119c..f4616f1d72bc7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java @@ -51,4 +51,10 @@ public class FSExceptionMessages { public static final String PERMISSION_DENIED_BY_STICKY_BIT = "Permission denied by sticky bit"; + + /** + * A call was made to abort(), but it is not supported. + */ + public static final String ABORTABLE_UNSUPPORTED = + "Abortable.abort() is not supported"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java index 672ab15f16c3b..ad2642f7db963 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java @@ -21,9 +21,12 @@ import java.io.IOException; import java.io.InputStream; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.statistics.IOStatisticsLogging; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -134,4 +137,23 @@ public void readFully(long position, byte[] buffer) throws IOException { readFully(position, buffer, 0, buffer.length); } + + /** + * toString method returns the superclass toString, but if the subclass + * implements {@link IOStatisticsSource} then those statistics are + * extracted and included in the output. + * That is: statistics of subclasses are automatically reported. + * @return a string value. + */ + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(super.toString()); + sb.append('{'); + if (this instanceof IOStatisticsSource) { + sb.append(IOStatisticsLogging.ioStatisticsSourceToString( + (IOStatisticsSource) this)); + } + sb.append('}'); + return sb.toString(); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java index 2458b2f40d8d7..aaa19adf8c6a4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java @@ -33,7 +33,8 @@ */ @InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceStability.Unstable -abstract public class FSOutputSummer extends OutputStream { +abstract public class FSOutputSummer extends OutputStream implements + StreamCapabilities { // data checksum private final DataChecksum sum; // internal buffer for storing data before it is checksumed @@ -254,4 +255,9 @@ protected synchronized void setChecksumBufSize(int size) { protected synchronized void resetChecksumBufSize() { setChecksumBufSize(sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS); } + + @Override + public boolean hasCapability(String capability) { + return false; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index df93e89750ee0..422eae9337771 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -65,7 +65,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ShutdownHookManager; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.htrace.core.Tracer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -507,10 +508,9 @@ public static FileContext getLocalFSFileContext(final Configuration aConf) return getFileContext(FsConstants.LOCAL_FS_URI, aConf); } - /* This method is needed for tests. */ + @VisibleForTesting @InterfaceAudience.Private - @InterfaceStability.Unstable /* return type will change to AFS once - HADOOP-6223 is completed */ + @InterfaceStability.Unstable public AbstractFileSystem getDefaultFileSystem() { return defaultFS; } @@ -1249,6 +1249,16 @@ public FileStatus next(final AbstractFileSystem fs, final Path p) }.resolve(this, absF); } + /** + * Synchronize client metadata state. + * + * @throws IOException + * @throws UnsupportedOperationException + */ + public void msync() throws IOException, UnsupportedOperationException { + defaultFS.msync(); + } + /** * Checks if the user can access a path. The mode specifies which access * checks to perform. If the requested permissions are granted, then the @@ -2957,4 +2967,31 @@ public boolean hasPathCapability(Path path, String capability) (fs, p) -> fs.hasPathCapability(p, capability)); } + /** + * Return a set of server default configuration values based on path. + * @param path path to fetch server defaults + * @return server default configuration values for path + * @throws IOException an I/O error occurred + */ + public FsServerDefaults getServerDefaults(final Path path) + throws IOException { + return FsLinkResolution.resolve(this, + fixRelativePart(path), + (fs, p) -> fs.getServerDefaults(p)); + } + + /** + * Create a multipart uploader. + * @param basePath file path under which all files are uploaded + * @return a MultipartUploaderBuilder object to build the uploader + * @throws IOException if some early checks cause IO failures. + * @throws UnsupportedOperationException if support is checked early. + */ + @InterfaceStability.Unstable + public MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException { + return FsLinkResolution.resolve(this, + fixRelativePart(basePath), + (fs, p) -> fs.createMultipartUploader(p)); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java index 4cfce2eed6c2c..5444103855d26 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java @@ -24,8 +24,8 @@ import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * FileEncryptionInfo encapsulates all the encryption-related information for diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 358db744e65be..528f6c270f4ee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -21,6 +21,7 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InterruptedIOException; import java.lang.ref.WeakReference; import java.lang.ref.ReferenceQueue; import java.net.URI; @@ -44,6 +45,7 @@ import java.util.Stack; import java.util.TreeSet; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; @@ -75,6 +77,7 @@ import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.util.ClassUtil; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.ReflectionUtils; @@ -83,12 +86,12 @@ import org.apache.htrace.core.Tracer; import org.apache.htrace.core.TraceScope; -import com.google.common.base.Preconditions; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; @@ -132,22 +135,35 @@ * New methods may be marked as Unstable or Evolving for their initial release, * as a warning that they are new and may change based on the * experience of use in applications. + *

    * Important note for developers - * - * If you're making changes here to the public API or protected methods, + *

    + * If you are making changes here to the public API or protected methods, * you must review the following subclasses and make sure that * they are filtering/passing through new methods as appropriate. + *

    * - * {@link FilterFileSystem}: methods are passed through. + * {@link FilterFileSystem}: methods are passed through. If not, + * then {@code TestFilterFileSystem.MustNotImplement} must be + * updated with the unsupported interface. + * Furthermore, if the new API's support is probed for via + * {@link #hasPathCapability(Path, String)} then + * {@link FilterFileSystem#hasPathCapability(Path, String)} + * must return false, always. + *

    * {@link ChecksumFileSystem}: checksums are created and * verified. + *

    * {@code TestHarFileSystem} will need its {@code MustNotImplement} * interface updated. + *

    * * There are some external places your changes will break things. * Do co-ordinate changes here. + *

    * * HBase: HBoss + *

    * Hive: HiveShim23 * {@code shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java} * @@ -187,7 +203,7 @@ public abstract class FileSystem extends Configured public static final String USER_HOME_PREFIX = "/user"; /** FileSystem cache. */ - static final Cache CACHE = new Cache(); + static final Cache CACHE = new Cache(new Configuration()); /** The key this instance is stored under in the cache. */ private Cache.Key key; @@ -607,6 +623,7 @@ public static LocalFileSystem newInstanceLocal(Configuration conf) * @throws IOException a problem arose closing one or more filesystem. */ public static void closeAll() throws IOException { + debugLogFileSystemClose("closeAll", ""); CACHE.closeAll(); } @@ -617,10 +634,24 @@ public static void closeAll() throws IOException { * @throws IOException a problem arose closing one or more filesystem. */ public static void closeAllForUGI(UserGroupInformation ugi) - throws IOException { + throws IOException { + debugLogFileSystemClose("closeAllForUGI", "UGI: " + ugi); CACHE.closeAll(ugi); } + private static void debugLogFileSystemClose(String methodName, + String additionalInfo) { + if (LOGGER.isDebugEnabled()) { + Throwable throwable = new Throwable().fillInStackTrace(); + LOGGER.debug("FileSystem.{}() by method: {}); {}", methodName, + throwable.getStackTrace()[2], additionalInfo); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("FileSystem.{}() full stack trace:", methodName, + throwable); + } + } + } + /** * Qualify a path to one which uses this FileSystem and, if relative, * made absolute. @@ -2200,7 +2231,9 @@ private void fetchMore() throws IOException { @Override @SuppressWarnings("unchecked") public T next() throws IOException { - Preconditions.checkState(hasNext(), "No more items in iterator"); + if (!hasNext()) { + throw new NoSuchElementException("No more items in iterator"); + } if (i == entries.getEntries().length) { fetchMore(); } @@ -2556,9 +2589,15 @@ public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) */ @Override public void close() throws IOException { + debugLogFileSystemClose("close", "Key: " + key + "; URI: " + getUri() + + "; Object Identity Hash: " + + Integer.toHexString(System.identityHashCode(this))); // delete all files that were marked as delete-on-exit. - processDeleteOnExit(); - CACHE.remove(this.key, this); + try { + processDeleteOnExit(); + } finally { + CACHE.remove(this.key, this); + } } /** @@ -2641,6 +2680,20 @@ public short getDefaultReplication(Path path) { */ public abstract FileStatus getFileStatus(Path f) throws IOException; + /** + * Synchronize client metadata state. + *

    + * In some FileSystem implementations such as HDFS metadata + * synchronization is essential to guarantee consistency of read requests + * particularly in HA setting. + * @throws IOException + * @throws UnsupportedOperationException + */ + public void msync() throws IOException, UnsupportedOperationException { + throw new UnsupportedOperationException(getClass().getCanonicalName() + + " does not support method msync"); + } + /** * Checks if the user can access a path. The mode specifies which access * checks to perform. If the requested permissions are granted, then the @@ -3405,7 +3458,9 @@ public static Class getFileSystemClass(String scheme, private static FileSystem createFileSystem(URI uri, Configuration conf) throws IOException { Tracer tracer = FsTracer.get(conf); - try(TraceScope scope = tracer.newScope("FileSystem#createFileSystem")) { + try(TraceScope scope = tracer.newScope("FileSystem#createFileSystem"); + DurationInfo ignored = + new DurationInfo(LOGGER, false, "Creating FS %s", uri)) { scope.addKVAnnotation("scheme", uri.getScheme()); Class clazz = getFileSystemClass(uri.getScheme(), conf); @@ -3428,15 +3483,39 @@ private static FileSystem createFileSystem(URI uri, Configuration conf) } /** Caching FileSystem objects. */ - static class Cache { + static final class Cache { private final ClientFinalizer clientFinalizer = new ClientFinalizer(); private final Map map = new HashMap<>(); private final Set toAutoClose = new HashSet<>(); + /** Semaphore used to serialize creation of new FS instances. */ + private final Semaphore creatorPermits; + + /** + * Counter of the number of discarded filesystem instances + * in this cache. Primarily for testing, but it could possibly + * be made visible as some kind of metric. + */ + private final AtomicLong discardedInstances = new AtomicLong(0); + /** A variable that makes all objects in the cache unique. */ private static AtomicLong unique = new AtomicLong(1); + /** + * Instantiate. The configuration is used to read the + * count of permits issued for concurrent creation + * of filesystem instances. + * @param conf configuration + */ + Cache(final Configuration conf) { + int permits = conf.getInt(FS_CREATION_PARALLEL_COUNT, + FS_CREATION_PARALLEL_COUNT_DEFAULT); + checkArgument(permits > 0, "Invalid value of %s: %s", + FS_CREATION_PARALLEL_COUNT, permits); + creatorPermits = new Semaphore(permits); + } + FileSystem get(URI uri, Configuration conf) throws IOException{ Key key = new Key(uri, conf); return getInternal(uri, conf, key); @@ -3470,33 +3549,86 @@ private FileSystem getInternal(URI uri, Configuration conf, Key key) if (fs != null) { return fs; } - - fs = createFileSystem(uri, conf); - final long timeout = conf.getTimeDuration(SERVICE_SHUTDOWN_TIMEOUT, - SERVICE_SHUTDOWN_TIMEOUT_DEFAULT, - ShutdownHookManager.TIME_UNIT_DEFAULT); - synchronized (this) { // refetch the lock again - FileSystem oldfs = map.get(key); - if (oldfs != null) { // a file system is created while lock is releasing - fs.close(); // close the new file system - return oldfs; // return the old file system - } - - // now insert the new file system into the map - if (map.isEmpty() - && !ShutdownHookManager.get().isShutdownInProgress()) { - ShutdownHookManager.get().addShutdownHook(clientFinalizer, - SHUTDOWN_HOOK_PRIORITY, timeout, - ShutdownHookManager.TIME_UNIT_DEFAULT); + // fs not yet created, acquire lock + // to construct an instance. + try (DurationInfo d = new DurationInfo(LOGGER, false, + "Acquiring creator semaphore for %s", uri)) { + creatorPermits.acquire(); + } catch (InterruptedException e) { + // acquisition was interrupted; convert to an IOE. + throw (IOException)new InterruptedIOException(e.toString()) + .initCause(e); + } + FileSystem fsToClose = null; + try { + // See if FS was instantiated by another thread while waiting + // for the permit. + synchronized (this) { + fs = map.get(key); } - fs.key = key; - map.put(key, fs); - if (conf.getBoolean( - FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { - toAutoClose.add(key); + if (fs != null) { + LOGGER.debug("Filesystem {} created while awaiting semaphore", uri); + return fs; } - return fs; + // create the filesystem + fs = createFileSystem(uri, conf); + final long timeout = conf.getTimeDuration(SERVICE_SHUTDOWN_TIMEOUT, + SERVICE_SHUTDOWN_TIMEOUT_DEFAULT, + ShutdownHookManager.TIME_UNIT_DEFAULT); + // any FS to close outside of the synchronized section + synchronized (this) { // lock on the Cache object + + // see if there is now an entry for the FS, which happens + // if another thread's creation overlapped with this one. + FileSystem oldfs = map.get(key); + if (oldfs != null) { + // a file system was created in a separate thread. + // save the FS reference to close outside all locks, + // and switch to returning the oldFS + fsToClose = fs; + fs = oldfs; + } else { + // register the clientFinalizer if needed and shutdown isn't + // already active + if (map.isEmpty() + && !ShutdownHookManager.get().isShutdownInProgress()) { + ShutdownHookManager.get().addShutdownHook(clientFinalizer, + SHUTDOWN_HOOK_PRIORITY, timeout, + ShutdownHookManager.TIME_UNIT_DEFAULT); + } + // insert the new file system into the map + fs.key = key; + map.put(key, fs); + if (conf.getBoolean( + FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) { + toAutoClose.add(key); + } + } + } // end of synchronized block + } finally { + // release the creator permit. + creatorPermits.release(); } + if (fsToClose != null) { + LOGGER.debug("Duplicate FS created for {}; discarding {}", + uri, fs); + discardedInstances.incrementAndGet(); + // close the new file system + // note this will briefly remove and reinstate "fsToClose" from + // the map. It is done in a synchronized block so will not be + // visible to others. + IOUtils.cleanupWithLogger(LOGGER, fsToClose); + } + return fs; + } + + /** + * Get the count of discarded instances. + * @return the new instance. + */ + @VisibleForTesting + long getDiscardedInstances() { + return discardedInstances.get(); } synchronized void remove(Key key, FileSystem fs) { @@ -4643,4 +4775,17 @@ public CompletableFuture build() throws IOException { } + /** + * Create a multipart uploader. + * @param basePath file path under which all files are uploaded + * @return a MultipartUploaderBuilder object to build the uploader + * @throws IOException if some early checks cause IO failures. + * @throws UnsupportedOperationException if support is checked early. + */ + @InterfaceStability.Unstable + public MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException { + methodNotSupported(); + return null; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java index 43c23abadea44..f717e03692378 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java @@ -20,7 +20,7 @@ import java.util.Iterator; import java.util.NoSuchElementException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index 7bc93f9bf5db8..73ca6e65216e7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -398,6 +398,12 @@ public static boolean copy(FileSystem srcFS, FileStatus srcStatus, Configuration conf) throws IOException { Path src = srcStatus.getPath(); dst = checkDest(src.getName(), dstFS, dst, overwrite); + + if (srcFS.makeQualified(src).equals(dstFS.makeQualified(dst))) { + throw new PathOperationException("Source (" + src + ") and destination " + + "(" + dst + ") are equal in the copy command."); + } + if (srcStatus.isDirectory()) { checkDependencies(srcFS, src, dstFS, dst); if (!dstFS.mkdirs(dst)) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index cf12ea3898a7f..607aa263622f6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -41,6 +41,8 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Progressable; +import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; + /**************************************************************** * A FilterFileSystem contains * some other file system, which it uses as @@ -460,6 +462,11 @@ public FileStatus getFileStatus(Path f) throws IOException { return fs.getFileStatus(f); } + @Override + public void msync() throws IOException, UnsupportedOperationException { + fs.msync(); + } + @Override public void access(Path path, FsAction mode) throws AccessControlException, FileNotFoundException, IOException { @@ -728,7 +735,16 @@ protected CompletableFuture openFileWithOptions( @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { - return fs.hasPathCapability(path, capability); + switch (validatePathCapabilityArgs(makeQualified(path), capability)) { + case CommonPathCapabilities.FS_MULTIPART_UPLOADER: + case CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING: + // operations known to be unsupported, irrespective of what + // the wrapped class implements. + return false; + default: + // the feature is not implemented. + return fs.hasPathCapability(path, capability); + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index e197506edc88b..7d979b37b4a50 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -124,6 +124,11 @@ public FileStatus getFileStatus(Path f) return myFs.getFileStatus(f); } + @Override + public void msync() throws IOException, UnsupportedOperationException { + myFs.msync(); + } + @Override public void access(Path path, FsAction mode) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { @@ -448,4 +453,10 @@ public boolean hasPathCapability(final Path path, throws IOException { return myFs.hasPathCapability(path, capability); } + + @Override + public MultipartUploaderBuilder createMultipartUploader(final Path basePath) + throws IOException { + return myFs.createMultipartUploader(basePath); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java index cfef1c3827917..603454210644d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java @@ -42,4 +42,7 @@ public interface FsConstants { */ public static final URI VIEWFS_URI = URI.create("viewfs:///"); public static final String VIEWFS_SCHEME = "viewfs"; + String FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN = + "fs.viewfs.overload.scheme.target.%s.impl"; + String VIEWFS_TYPE = "viewfs"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsTracer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsTracer.java index e422336739a44..6fab4bdfebc99 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsTracer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsTracer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java index c5429d2370250..11b3e91e86c3a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java @@ -24,7 +24,7 @@ import java.net.URL; import java.net.URLConnection; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java index 2dba525e5d9d1..0cf3b62bccfe9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java @@ -23,7 +23,7 @@ import java.util.NoSuchElementException; import java.util.TreeMap; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java index f301f22057925..9cdcb4ac4acd3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java @@ -32,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Implementation of {@link FileSystem#globStatus(Path, PathFilter)}. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java index 5f4c4a236e96c..7e12d0a11e953 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java @@ -35,6 +35,7 @@ import java.net.URISyntaxException; import java.net.URLDecoder; import java.util.*; +import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; @@ -513,41 +514,22 @@ private void fileStatusesInIndex(HarStatus parent, List statuses) if (!parentString.endsWith(Path.SEPARATOR)){ parentString += Path.SEPARATOR; } - Path harPath = new Path(parentString); - int harlen = harPath.depth(); - final Map cache = new TreeMap(); - - for (HarStatus hstatus : metadata.archive.values()) { - String child = hstatus.getName(); - if ((child.startsWith(parentString))) { - Path thisPath = new Path(child); - if (thisPath.depth() == harlen + 1) { - statuses.add(toFileStatus(hstatus, cache)); - } - } + + for (String child: parent.children) { + Path p = new Path(parentString + child); + statuses.add(toFileStatus(metadata.archive.get(p))); } } /** * Combine the status stored in the index and the underlying status. * @param h status stored in the index - * @param cache caching the underlying file statuses * @return the combined file status * @throws IOException */ - private FileStatus toFileStatus(HarStatus h, - Map cache) throws IOException { - FileStatus underlying = null; - if (cache != null) { - underlying = cache.get(h.partName); - } - if (underlying == null) { - final Path p = h.isDir? archivePath: new Path(archivePath, h.partName); - underlying = fs.getFileStatus(p); - if (cache != null) { - cache.put(h.partName, underlying); - } - } + private FileStatus toFileStatus(HarStatus h) throws IOException { + final Path p = h.isDir ? archivePath : new Path(archivePath, h.partName); + FileStatus underlying = metadata.getPartFileStatus(p); long modTime = 0; int version = metadata.getVersion(); @@ -658,7 +640,7 @@ public long getModificationTime() { @Override public FileStatus getFileStatus(Path f) throws IOException { HarStatus hstatus = getFileHarStatus(f); - return toFileStatus(hstatus, null); + return toFileStatus(hstatus); } private HarStatus getFileHarStatus(Path f) throws IOException { @@ -676,6 +658,11 @@ private HarStatus getFileHarStatus(Path f) throws IOException { return hstatus; } + @Override + public void msync() throws IOException, UnsupportedOperationException { + fs.msync(); + } + /** * @return null since no checksum algorithm is implemented. */ @@ -810,7 +797,7 @@ public FileStatus[] listStatus(Path f) throws IOException { if (hstatus.isDir()) { fileStatusesInIndex(hstatus, statuses); } else { - statuses.add(toFileStatus(hstatus, null)); + statuses.add(toFileStatus(hstatus)); } return statuses.toArray(new FileStatus[statuses.size()]); @@ -1138,7 +1125,8 @@ private class HarMetaData { List stores = new ArrayList(); Map archive = new HashMap(); - private Map partFileStatuses = new HashMap(); + // keys are always the internal har path. + private Map partFileStatuses = new ConcurrentHashMap<>(); public HarMetaData(FileSystem fs, Path masterIndexPath, Path archiveIndexPath) { this.fs = fs; @@ -1146,16 +1134,23 @@ public HarMetaData(FileSystem fs, Path masterIndexPath, Path archiveIndexPath) { this.archiveIndexPath = archiveIndexPath; } - public FileStatus getPartFileStatus(Path partPath) throws IOException { + public FileStatus getPartFileStatus(Path path) throws IOException { + Path partPath = getPathInHar(path); FileStatus status; status = partFileStatuses.get(partPath); if (status == null) { - status = fs.getFileStatus(partPath); + status = fs.getFileStatus(path); partFileStatuses.put(partPath, status); } return status; } + private void addPartFileStatuses(Path path) throws IOException { + for (FileStatus stat : fs.listStatus(path)) { + partFileStatuses.put(getPathInHar(stat.getPath()), stat); + } + } + public long getMasterIndexTimestamp() { return masterIndexTimestamp; } @@ -1212,16 +1207,22 @@ private void parseMetaData() throws IOException { try { FileStatus archiveStat = fs.getFileStatus(archiveIndexPath); archiveIndexTimestamp = archiveStat.getModificationTime(); - LineReader aLin; + + // pre-populate part cache. + addPartFileStatuses(archiveIndexPath.getParent()); + LineReader aLin = null; // now start reading the real index file + long pos = -1; for (Store s: stores) { - read = 0; - aIn.seek(s.begin); - aLin = new LineReader(aIn, getConf()); - while (read + s.begin < s.end) { - int tmp = aLin.readLine(line); - read += tmp; + if (pos != s.begin) { + pos = s.begin; + aIn.seek(s.begin); + aLin = new LineReader(aIn, getConf()); + } + + while (pos < s.end) { + pos += aLin.readLine(line); String lineFeed = line.toString(); String[] parsed = lineFeed.split(" "); parsed[0] = decodeFileName(parsed[0]); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java index 8b47dfeb9a7ce..30f793dadfec3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java @@ -29,7 +29,7 @@ import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import static java.nio.file.Files.createLink; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java new file mode 100644 index 0000000000000..2db33eead9288 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InternalOperations.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; + + +/** + * This method allows access to Package-scoped operations from classes + * in org.apache.hadoop.fs.impl and other file system implementations + * in the hadoop modules. + * This is absolutely not for used by any other application or library. + */ +@InterfaceAudience.Private +public class InternalOperations { + + @SuppressWarnings("deprecation") // rename w/ OVERWRITE + public void rename(FileSystem fs, final Path src, final Path dst, + final Options.Rename...options) throws IOException { + fs.rename(src, dst, options); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java index 7ed987eed90dd..dcb76b50b3429 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,45 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.hadoop.fs; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.concurrent.CompletableFuture; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; - -import static com.google.common.base.Preconditions.checkArgument; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; /** * MultipartUploader is an interface for copying files multipart and across - * multiple nodes. Users should: - *

      - *
    1. Initialize an upload.
    2. - *
    3. Upload parts in any order.
    4. - *
    5. Complete the upload in order to have it materialize in the destination - * FS.
    6. - *
    + * multiple nodes. + *

    + * The interface extends {@link IOStatisticsSource} so that there is no + * need to cast an instance to see if is a source of statistics. + * However, implementations MAY return null for their actual statistics. */ -@InterfaceAudience.Private +@InterfaceAudience.Public @InterfaceStability.Unstable -public abstract class MultipartUploader implements Closeable { - public static final Logger LOG = - LoggerFactory.getLogger(MultipartUploader.class); +public interface MultipartUploader extends Closeable, + IOStatisticsSource { - /** - * Perform any cleanup. - * The upload is not required to support any operations after this. - * @throws IOException problems on close. - */ - @Override - public void close() throws IOException { - } /** * Initialize a multipart upload. @@ -61,94 +48,64 @@ public void close() throws IOException { * @return unique identifier associating part uploads. * @throws IOException IO failure */ - public abstract UploadHandle initialize(Path filePath) throws IOException; + CompletableFuture startUpload(Path filePath) + throws IOException; /** * Put part as part of a multipart upload. * It is possible to have parts uploaded in any order (or in parallel). - * @param filePath Target path for upload (same as {@link #initialize(Path)}). + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param partNumber Index of the part relative to others. + * @param filePath Target path for upload (as {@link #startUpload(Path)}). * @param inputStream Data for this part. Implementations MUST close this * stream after reading in the data. - * @param partNumber Index of the part relative to others. - * @param uploadId Identifier from {@link #initialize(Path)}. * @param lengthInBytes Target length to read from the stream. * @return unique PartHandle identifier for the uploaded part. * @throws IOException IO failure */ - public abstract PartHandle putPart(Path filePath, InputStream inputStream, - int partNumber, UploadHandle uploadId, long lengthInBytes) + CompletableFuture putPart( + UploadHandle uploadId, + int partNumber, + Path filePath, + InputStream inputStream, + long lengthInBytes) throws IOException; /** * Complete a multipart upload. - * @param filePath Target path for upload (same as {@link #initialize(Path)}. + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param filePath Target path for upload (as {@link #startUpload(Path)}. * @param handles non-empty map of part number to part handle. - * from {@link #putPart(Path, InputStream, int, UploadHandle, long)}. - * @param multipartUploadId Identifier from {@link #initialize(Path)}. + * from {@link #putPart(UploadHandle, int, Path, InputStream, long)}. * @return unique PathHandle identifier for the uploaded file. * @throws IOException IO failure */ - public abstract PathHandle complete(Path filePath, - Map handles, - UploadHandle multipartUploadId) + CompletableFuture complete( + UploadHandle uploadId, + Path filePath, + Map handles) throws IOException; /** * Aborts a multipart upload. - * @param filePath Target path for upload (same as {@link #initialize(Path)}. - * @param multipartUploadId Identifier from {@link #initialize(Path)}. + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param filePath Target path for upload (same as {@link #startUpload(Path)}. * @throws IOException IO failure + * @return a future; the operation will have completed */ - public abstract void abort(Path filePath, UploadHandle multipartUploadId) + CompletableFuture abort(UploadHandle uploadId, Path filePath) throws IOException; /** - * Utility method to validate uploadIDs. - * @param uploadId Upload ID - * @throws IllegalArgumentException invalid ID - */ - protected void checkUploadId(byte[] uploadId) - throws IllegalArgumentException { - checkArgument(uploadId != null, "null uploadId"); - checkArgument(uploadId.length > 0, - "Empty UploadId is not valid"); - } - - /** - * Utility method to validate partHandles. - * @param partHandles handles - * @throws IllegalArgumentException if the parts are invalid + * Best effort attempt to aborts multipart uploads under a path. + * Not all implementations support this, and those which do may + * be vulnerable to eventually consistent listings of current uploads + * -some may be missed. + * @param path path to abort uploads under. + * @return a future to the number of entries aborted; + * -1 if aborting is unsupported + * @throws IOException IO failure */ - protected void checkPartHandles(Map partHandles) { - checkArgument(!partHandles.isEmpty(), - "Empty upload"); - partHandles.keySet() - .stream() - .forEach(key -> - checkArgument(key > 0, - "Invalid part handle index %s", key)); - } + CompletableFuture abortUploadsUnderPath(Path path) throws IOException; - /** - * Check all the arguments to the - * {@link #putPart(Path, InputStream, int, UploadHandle, long)} operation. - * @param filePath Target path for upload (same as {@link #initialize(Path)}). - * @param inputStream Data for this part. Implementations MUST close this - * stream after reading in the data. - * @param partNumber Index of the part relative to others. - * @param uploadId Identifier from {@link #initialize(Path)}. - * @param lengthInBytes Target length to read from the stream. - * @throws IllegalArgumentException invalid argument - */ - protected void checkPutArguments(Path filePath, - InputStream inputStream, - int partNumber, - UploadHandle uploadId, - long lengthInBytes) throws IllegalArgumentException { - checkArgument(filePath != null, "null filePath"); - checkArgument(inputStream != null, "null inputStream"); - checkArgument(partNumber > 0, "Invalid part number: %d", partNumber); - checkArgument(uploadId != null, "null uploadId"); - checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes); - } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java new file mode 100644 index 0000000000000..381bfaa07f6d1 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderBuilder.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import javax.annotation.Nonnull; +import java.io.IOException; + +import org.apache.hadoop.fs.permission.FsPermission; + +/** + * Builder interface for Multipart readers. + * @param + * @param + */ +public interface MultipartUploaderBuilder> + extends FSBuilder { + + /** + * Set permission for the file. + */ + B permission(@Nonnull FsPermission perm); + + /** + * Set the size of the buffer to be used. + */ + B bufferSize(int bufSize); + + /** + * Set replication factor. + */ + B replication(short replica); + + /** + * Set block size. + */ + B blockSize(long blkSize); + + /** + * Create an FSDataOutputStream at the specified path. + */ + B create(); + + /** + * Set to true to overwrite the existing file. + * Set it to false, an exception will be thrown when calling {@link #build()} + * if the file exists. + */ + B overwrite(boolean overwrite); + + /** + * Append to an existing file (optional operation). + */ + B append(); + + /** + * Set checksum opt. + */ + B checksumOpt(@Nonnull Options.ChecksumOpt chksumOpt); + + /** + * Create the FSDataOutputStream to write on the file system. + * + * @throws IllegalArgumentException if the parameters are not valid. + * @throws IOException on errors when file system creates or appends the file. + */ + S build() throws IllegalArgumentException, IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java deleted file mode 100644 index e35b6bf18bbd6..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploaderFactory.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Iterator; -import java.util.ServiceLoader; - -/** - * {@link ServiceLoader}-driven uploader API for storage services supporting - * multipart uploads. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public abstract class MultipartUploaderFactory { - public static final Logger LOG = - LoggerFactory.getLogger(MultipartUploaderFactory.class); - - /** - * Multipart Uploaders listed as services. - */ - private static ServiceLoader serviceLoader = - ServiceLoader.load(MultipartUploaderFactory.class, - MultipartUploaderFactory.class.getClassLoader()); - - // Iterate through the serviceLoader to avoid lazy loading. - // Lazy loading would require synchronization in concurrent use cases. - static { - Iterator iterServices = serviceLoader.iterator(); - while (iterServices.hasNext()) { - iterServices.next(); - } - } - - /** - * Get the multipart loader for a specific filesystem. - * @param fs filesystem - * @param conf configuration - * @return an uploader, or null if one was found. - * @throws IOException failure during the creation process. - */ - public static MultipartUploader get(FileSystem fs, Configuration conf) - throws IOException { - MultipartUploader mpu = null; - for (MultipartUploaderFactory factory : serviceLoader) { - mpu = factory.createMultipartUploader(fs, conf); - if (mpu != null) { - break; - } - } - return mpu; - } - - protected abstract MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) throws IOException; -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java index 043f84612dc8b..80d173e905ed5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index cf2210575da15..21c69b78ca3f6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.BufferedOutputStream; import java.io.DataOutput; @@ -40,13 +40,20 @@ import java.nio.file.attribute.FileTime; import java.util.Arrays; import java.util.EnumSet; +import java.util.Locale; import java.util.Optional; import java.util.StringTokenizer; +import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.BufferedIOStatisticsOutputStream; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.util.Progressable; @@ -54,6 +61,14 @@ import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_BYTES; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_EXCEPTIONS; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_SKIP_BYTES; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_SKIP_OPERATIONS; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_WRITE_BYTES; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_WRITE_EXCEPTIONS; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; /**************************************************************** * Implement the FileSystem API for the raw local filesystem. @@ -64,6 +79,7 @@ public class RawLocalFileSystem extends FileSystem { static final URI NAME = URI.create("file:///"); private Path workingDir; + private long defaultBlockSize; // Temporary workaround for HADOOP-9652. private static boolean useDeprecatedFileStatus = true; @@ -100,17 +116,36 @@ public File pathToFile(Path path) { public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf); + defaultBlockSize = getDefaultBlockSize(new Path(uri)); } /******************************************************* * For open()'s FSInputStream. *******************************************************/ - class LocalFSFileInputStream extends FSInputStream implements HasFileDescriptor { + class LocalFSFileInputStream extends FSInputStream implements + HasFileDescriptor, IOStatisticsSource, StreamCapabilities { private FileInputStream fis; private long position; + /** + * Minimal set of counters. + */ + private final IOStatisticsStore ioStatistics = iostatisticsStore() + .withCounters( + STREAM_READ_BYTES, + STREAM_READ_EXCEPTIONS, + STREAM_READ_SEEK_OPERATIONS, + STREAM_READ_SKIP_OPERATIONS, + STREAM_READ_SKIP_BYTES) + .build(); + + /** Reference to the bytes read counter for slightly faster counting. */ + private final AtomicLong bytesRead; + public LocalFSFileInputStream(Path f) throws IOException { fis = new FileInputStream(pathToFile(f)); + bytesRead = ioStatistics.getCounterReference( + STREAM_READ_BYTES); } @Override @@ -133,8 +168,8 @@ public boolean seekToNewSource(long targetPos) throws IOException { return false; } - /* - * Just forward to the fis + /** + * Just forward to the fis. */ @Override public int available() throws IOException { return fis.available(); } @@ -150,9 +185,11 @@ public int read() throws IOException { if (value >= 0) { this.position++; statistics.incrementBytesRead(1); + bytesRead.addAndGet(1); } return value; } catch (IOException e) { // unexpected exception + ioStatistics.incrementCounter(STREAM_READ_EXCEPTIONS); throw new FSError(e); // assume native fs error } } @@ -166,9 +203,11 @@ public int read(byte[] b, int off, int len) throws IOException { if (value > 0) { this.position += value; statistics.incrementBytesRead(value); + bytesRead.addAndGet(value); } return value; } catch (IOException e) { // unexpected exception + ioStatistics.incrementCounter(STREAM_READ_EXCEPTIONS); throw new FSError(e); // assume native fs error } } @@ -187,18 +226,22 @@ public int read(long position, byte[] b, int off, int len) int value = fis.getChannel().read(bb, position); if (value > 0) { statistics.incrementBytesRead(value); + ioStatistics.incrementCounter(STREAM_READ_BYTES, value); } return value; } catch (IOException e) { + ioStatistics.incrementCounter(STREAM_READ_EXCEPTIONS); throw new FSError(e); } } @Override public long skip(long n) throws IOException { + ioStatistics.incrementCounter(STREAM_READ_SKIP_OPERATIONS); long value = fis.skip(n); if (value > 0) { this.position += value; + ioStatistics.incrementCounter(STREAM_READ_SKIP_BYTES, value); } return value; } @@ -207,6 +250,23 @@ public long skip(long n) throws IOException { public FileDescriptor getFileDescriptor() throws IOException { return fis.getFD(); } + + @Override + public boolean hasCapability(String capability) { + // a bit inefficient, but intended to make it easier to add + // new capabilities. + switch (capability.toLowerCase(Locale.ENGLISH)) { + case StreamCapabilities.IOSTATISTICS: + return true; + default: + return false; + } + } + + @Override + public IOStatistics getIOStatistics() { + return ioStatistics; + } } @Override @@ -231,9 +291,19 @@ public FSDataInputStream open(PathHandle fd, int bufferSize) /********************************************************* * For create()'s FSOutputStream. *********************************************************/ - class LocalFSFileOutputStream extends OutputStream { + final class LocalFSFileOutputStream extends OutputStream implements + IOStatisticsSource, StreamCapabilities, Syncable { private FileOutputStream fos; - + + /** + * Minimal set of counters. + */ + private final IOStatisticsStore ioStatistics = iostatisticsStore() + .withCounters( + STREAM_WRITE_BYTES, + STREAM_WRITE_EXCEPTIONS) + .build(); + private LocalFSFileOutputStream(Path f, boolean append, FsPermission permission) throws IOException { File file = pathToFile(f); @@ -273,7 +343,9 @@ private LocalFSFileOutputStream(Path f, boolean append, public void write(byte[] b, int off, int len) throws IOException { try { fos.write(b, off, len); + ioStatistics.incrementCounter(STREAM_WRITE_BYTES, len); } catch (IOException e) { // unexpected exception + ioStatistics.incrementCounter(STREAM_WRITE_EXCEPTIONS); throw new FSError(e); // assume native fs error } } @@ -282,10 +354,44 @@ public void write(byte[] b, int off, int len) throws IOException { public void write(int b) throws IOException { try { fos.write(b); + ioStatistics.incrementCounter(STREAM_WRITE_BYTES); } catch (IOException e) { // unexpected exception + ioStatistics.incrementCounter(STREAM_WRITE_EXCEPTIONS); throw new FSError(e); // assume native fs error } } + + @Override + public void hflush() throws IOException { + flush(); + } + + /** + * HSync calls sync on fhe file descriptor after a local flush() call. + * @throws IOException failure + */ + @Override + public void hsync() throws IOException { + flush(); + fos.getFD().sync(); + } + + @Override + public boolean hasCapability(String capability) { + // a bit inefficient, but intended to make it easier to add + // new capabilities. + switch (capability.toLowerCase(Locale.ENGLISH)) { + case StreamCapabilities.IOSTATISTICS: + return true; + default: + return StoreImplementationUtils.isProbeForSyncable(capability); + } + } + + @Override + public IOStatistics getIOStatistics() { + return ioStatistics; + } } @Override @@ -318,8 +424,8 @@ private FSDataOutputStream create(Path f, boolean overwrite, if (parent != null && !mkdirs(parent)) { throw new IOException("Mkdirs failed to create " + parent.toString()); } - return new FSDataOutputStream(new BufferedOutputStream( - createOutputStreamWithMode(f, false, permission), bufferSize), + return new FSDataOutputStream(new BufferedIOStatisticsOutputStream( + createOutputStreamWithMode(f, false, permission), bufferSize, true), statistics); } @@ -340,8 +446,8 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, if (exists(f) && !flags.contains(CreateFlag.OVERWRITE)) { throw new FileAlreadyExistsException("File already exists: " + f); } - return new FSDataOutputStream(new BufferedOutputStream( - createOutputStreamWithMode(f, false, permission), bufferSize), + return new FSDataOutputStream(new BufferedIOStatisticsOutputStream( + createOutputStreamWithMode(f, false, permission), bufferSize, true), statistics); } @@ -518,7 +624,12 @@ public FileStatus[] listStatus(Path f) throws IOException { } return new FileStatus[] { new DeprecatedRawLocalFileStatus(localf, - getDefaultBlockSize(f), this) }; + defaultBlockSize, this) }; + } + + @Override + public boolean exists(Path f) throws IOException { + return pathToFile(f).exists(); } protected boolean mkOneDir(File p2f) throws IOException { @@ -663,7 +774,7 @@ private FileStatus deprecatedGetFileStatus(Path f) throws IOException { File path = pathToFile(f); if (path.exists()) { return new DeprecatedRawLocalFileStatus(pathToFile(f), - getDefaultBlockSize(f), this); + defaultBlockSize, this); } else { throw new FileNotFoundException("File " + f + " does not exist"); } @@ -1051,7 +1162,7 @@ private FileStatus deprecatedGetFileLinkStatusInternal(final Path f) private FileStatus getNativeFileLinkStatus(final Path f, boolean dereference) throws IOException { checkPath(f); - Stat stat = new Stat(f, getDefaultBlockSize(f), dereference, this); + Stat stat = new Stat(f, defaultBlockSize, dereference, this); FileStatus status = stat.getFileStatus(); return status; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java index 5e80a140175e6..d2300872eb146 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Shell; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Wrapper for the Unix stat(1) command. Used to workaround the lack of diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java index 74631b5695537..2efe4566344ee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java @@ -19,6 +19,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; import java.util.Iterator; @@ -27,15 +28,16 @@ * instance. */ @InterfaceAudience.Public +@InterfaceStability.Stable public abstract class StorageStatistics { /** * These are common statistic names. - * + *

    * The following names are considered general and preserved across different * StorageStatistics classes. When implementing a new StorageStatistics, it is * highly recommended to use the common statistic names. - * + *

    * When adding new common statistic name constants, please make them unique. * By convention, they are implicitly unique: *

      @@ -43,39 +45,46 @@ public abstract class StorageStatistics { * underscores. *
    • the value of the constants are lowercase of the constant names.
    • *
    + * See {@link StoreStatisticNames} for the field names used here + * and elsewhere. */ @InterfaceStability.Evolving public interface CommonStatisticNames { // The following names are for file system operation invocations - String OP_APPEND = "op_append"; - String OP_COPY_FROM_LOCAL_FILE = "op_copy_from_local_file"; - String OP_CREATE = "op_create"; - String OP_CREATE_NON_RECURSIVE = "op_create_non_recursive"; - String OP_DELETE = "op_delete"; - String OP_EXISTS = "op_exists"; - String OP_GET_CONTENT_SUMMARY = "op_get_content_summary"; - String OP_GET_DELEGATION_TOKEN = "op_get_delegation_token"; - String OP_GET_FILE_CHECKSUM = "op_get_file_checksum"; - String OP_GET_FILE_STATUS = "op_get_file_status"; - String OP_GET_STATUS = "op_get_status"; - String OP_GLOB_STATUS = "op_glob_status"; - String OP_IS_FILE = "op_is_file"; - String OP_IS_DIRECTORY = "op_is_directory"; - String OP_LIST_FILES = "op_list_files"; - String OP_LIST_LOCATED_STATUS = "op_list_located_status"; - String OP_LIST_STATUS = "op_list_status"; - String OP_MKDIRS = "op_mkdirs"; - String OP_MODIFY_ACL_ENTRIES = "op_modify_acl_entries"; - String OP_OPEN = "op_open"; - String OP_REMOVE_ACL = "op_remove_acl"; - String OP_REMOVE_ACL_ENTRIES = "op_remove_acl_entries"; - String OP_REMOVE_DEFAULT_ACL = "op_remove_default_acl"; - String OP_RENAME = "op_rename"; - String OP_SET_ACL = "op_set_acl"; - String OP_SET_OWNER = "op_set_owner"; - String OP_SET_PERMISSION = "op_set_permission"; - String OP_SET_TIMES = "op_set_times"; - String OP_TRUNCATE = "op_truncate"; + String OP_APPEND = StoreStatisticNames.OP_APPEND; + String OP_COPY_FROM_LOCAL_FILE = + StoreStatisticNames.OP_COPY_FROM_LOCAL_FILE; + String OP_CREATE = StoreStatisticNames.OP_CREATE; + String OP_CREATE_NON_RECURSIVE = + StoreStatisticNames.OP_CREATE_NON_RECURSIVE; + String OP_DELETE = StoreStatisticNames.OP_DELETE; + String OP_EXISTS = StoreStatisticNames.OP_EXISTS; + String OP_GET_CONTENT_SUMMARY = + StoreStatisticNames.OP_GET_CONTENT_SUMMARY; + String OP_GET_DELEGATION_TOKEN = + StoreStatisticNames.OP_GET_DELEGATION_TOKEN; + String OP_GET_FILE_CHECKSUM = StoreStatisticNames.OP_GET_FILE_CHECKSUM; + String OP_GET_FILE_STATUS = StoreStatisticNames.OP_GET_FILE_STATUS; + String OP_GET_STATUS = StoreStatisticNames.OP_GET_STATUS; + String OP_GLOB_STATUS = StoreStatisticNames.OP_GLOB_STATUS; + String OP_IS_FILE = StoreStatisticNames.OP_IS_FILE; + String OP_IS_DIRECTORY = StoreStatisticNames.OP_IS_DIRECTORY; + String OP_LIST_FILES = StoreStatisticNames.OP_LIST_FILES; + String OP_LIST_LOCATED_STATUS = + StoreStatisticNames.OP_LIST_LOCATED_STATUS; + String OP_LIST_STATUS = StoreStatisticNames.OP_LIST_STATUS; + String OP_MKDIRS = StoreStatisticNames.OP_MKDIRS; + String OP_MODIFY_ACL_ENTRIES = StoreStatisticNames.OP_MODIFY_ACL_ENTRIES; + String OP_OPEN = StoreStatisticNames.OP_OPEN; + String OP_REMOVE_ACL = StoreStatisticNames.OP_REMOVE_ACL; + String OP_REMOVE_ACL_ENTRIES = StoreStatisticNames.OP_REMOVE_ACL_ENTRIES; + String OP_REMOVE_DEFAULT_ACL = StoreStatisticNames.OP_REMOVE_DEFAULT_ACL; + String OP_RENAME = StoreStatisticNames.OP_RENAME; + String OP_SET_ACL = StoreStatisticNames.OP_SET_ACL; + String OP_SET_OWNER = StoreStatisticNames.OP_SET_OWNER; + String OP_SET_PERMISSION = StoreStatisticNames.OP_SET_PERMISSION; + String OP_SET_TIMES = StoreStatisticNames.OP_SET_TIMES; + String OP_TRUNCATE = StoreStatisticNames.OP_TRUNCATE; } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java index e68e7b351ed78..861178019505e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java @@ -34,7 +34,11 @@ public interface StreamCapabilities { /** * Stream hflush capability implemented by {@link Syncable#hflush()}. + * + * Use the {@link #HSYNC} probe to check for the support of Syncable; + * it's that presence of {@code hsync()} which matters. */ + @Deprecated String HFLUSH = "hflush"; /** @@ -71,6 +75,18 @@ public interface StreamCapabilities { */ String PREADBYTEBUFFER = "in:preadbytebuffer"; + /** + * IOStatisticsSource API. + */ + String IOSTATISTICS = "iostatistics"; + + /** + * Stream abort() capability implemented by {@link Abortable#abort()}. + * This matches the Path Capability + * {@link CommonPathCapabilities#ABORTABLE_STREAM}. + */ + String ABORTABLE_STREAM = CommonPathCapabilities.ABORTABLE_STREAM; + /** * Capabilities that a stream can support and be queried for. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Syncable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Syncable.java index 7ec3509ce1df6..9cd458592ca22 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Syncable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Syncable.java @@ -23,20 +23,24 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -/** This interface for flush/sync operation. */ +/** + * This is the interface for flush/sync operations. + * Consult the Hadoop filesystem specification for the definition of the + * semantics of these operations. + */ @InterfaceAudience.Public -@InterfaceStability.Evolving +@InterfaceStability.Stable public interface Syncable { - + /** Flush out the data in client's user buffer. After the return of * this call, new readers will see the data. * @throws IOException if any error occurs */ - public void hflush() throws IOException; - + void hflush() throws IOException; + /** Similar to posix fsync, flush out the data in client's user buffer * all the way to the disk device (but the disk may have it in its cache). * @throws IOException if error occurs */ - public void hsync() throws IOException; + void hsync() throws IOException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java index 18972ea3ecf79..7682992d42590 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java @@ -38,7 +38,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java index 3d5b6af794682..2497ded48e7e9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java @@ -20,7 +20,7 @@ import java.util.Iterator; import java.util.NoSuchElementException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java index e15968dd6d273..bfd4daffc7cad 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * The value of XAttr is byte[], this class is to diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java index 4b144bfddf6c6..6899bb8d87426 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java @@ -20,11 +20,12 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.ConnectException; import java.net.URI; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.net.ftp.FTP; import org.apache.commons.net.ftp.FTPClient; import org.apache.commons.net.ftp.FTPFile; @@ -41,6 +42,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; @@ -110,7 +112,9 @@ public void initialize(URI uri, Configuration conf) throws IOException { // get // get port information from uri, (overrides info in conf) int port = uri.getPort(); - port = (port == -1) ? FTP.DEFAULT_PORT : port; + if(port == -1){ + port = conf.getInt(FS_FTP_HOST_PORT, FTP.DEFAULT_PORT); + } conf.setInt(FS_FTP_HOST_PORT, port); // get user/password information from URI (overrides info in conf) @@ -340,8 +344,19 @@ public FSDataOutputStream create(Path file, FsPermission permission, // file. The FTP client connection is closed when close() is called on the // FSDataOutputStream. client.changeWorkingDirectory(parent.toUri().getPath()); - FSDataOutputStream fos = new FSDataOutputStream(client.storeFileStream(file - .getName()), statistics) { + OutputStream outputStream = client.storeFileStream(file.getName()); + + if (!FTPReply.isPositivePreliminary(client.getReplyCode())) { + // The ftpClient is an inconsistent state. Must close the stream + // which in turn will logout and disconnect from FTP server + if (outputStream != null) { + IOUtils.closeStream(outputStream); + } + disconnect(client); + throw new IOException("Unable to create file: " + file + ", Aborting"); + } + + FSDataOutputStream fos = new FSDataOutputStream(outputStream, statistics) { @Override public void close() throws IOException { super.close(); @@ -356,12 +371,6 @@ public void close() throws IOException { } } }; - if (!FTPReply.isPositivePreliminary(client.getReplyCode())) { - // The ftpClient is an inconsistent state. Must close the stream - // which in turn will logout and disconnect from FTP server - fos.close(); - throw new IOException("Unable to create file: " + file + ", Aborting"); - } return fos; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractFSBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractFSBuilderImpl.java index 5fc92e97be76c..9cf8b3dc4d203 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractFSBuilderImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractFSBuilderImpl.java @@ -26,7 +26,7 @@ import java.util.Optional; import java.util.Set; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -36,8 +36,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathHandle; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Builder for filesystem/filecontext operations of various kinds, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java new file mode 100644 index 0000000000000..ed4bcc84e96f9 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; + +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UploadHandle; + +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; + +/** + * Standard base class for Multipart Uploaders. + */ +public abstract class AbstractMultipartUploader implements MultipartUploader { + + /** + * Base path of upload. + */ + private final Path basePath; + + /** + * Instantiate. + * @param basePath base path + */ + protected AbstractMultipartUploader(final Path basePath) { + this.basePath = Objects.requireNonNull(basePath, "null path"); + } + + /** + * Perform any cleanup. + * The upload is not required to support any operations after this. + * @throws IOException problems on close. + */ + @Override + public void close() throws IOException { + } + + protected Path getBasePath() { + return basePath; + } + + /** + * Validate a path. + * @param path path to check. + */ + protected void checkPath(Path path) { + Objects.requireNonNull(path, "null path"); + Preconditions.checkArgument(path.toString().startsWith(basePath.toString()), + "Path %s is not under %s", path, basePath); + } + + /** + * Utility method to validate uploadIDs. + * @param uploadId Upload ID + * @throws IllegalArgumentException invalid ID + */ + protected void checkUploadId(byte[] uploadId) + throws IllegalArgumentException { + checkArgument(uploadId != null, "null uploadId"); + checkArgument(uploadId.length > 0, + "Empty UploadId is not valid"); + } + + /** + * Utility method to validate partHandles. + * @param partHandles handles + * @throws IllegalArgumentException if the parts are invalid + */ + protected void checkPartHandles(Map partHandles) { + checkArgument(!partHandles.isEmpty(), + "Empty upload"); + partHandles.keySet() + .stream() + .forEach(key -> + checkArgument(key > 0, + "Invalid part handle index %s", key)); + } + + /** + * Check all the arguments to the + * {@link MultipartUploader#putPart(UploadHandle, int, Path, InputStream, long)} + * operation. + * @param filePath Target path for upload (as {@link #startUpload(Path)}). + * @param inputStream Data for this part. Implementations MUST close this + * stream after reading in the data. + * @param partNumber Index of the part relative to others. + * @param uploadId Identifier from {@link #startUpload(Path)}. + * @param lengthInBytes Target length to read from the stream. + * @throws IllegalArgumentException invalid argument + */ + protected void checkPutArguments(Path filePath, + InputStream inputStream, + int partNumber, + UploadHandle uploadId, + long lengthInBytes) throws IllegalArgumentException { + checkPath(filePath); + checkArgument(inputStream != null, "null inputStream"); + checkArgument(partNumber > 0, "Invalid part number: %d", partNumber); + checkArgument(uploadId != null, "null uploadId"); + checkArgument(lengthInBytes >= 0, "Invalid part length: %d", lengthInBytes); + } + + /** + * {@inheritDoc}. + * @param path path to abort uploads under. + * @return a future to -1. + * @throws IOException + */ + public CompletableFuture abortUploadsUnderPath(Path path) + throws IOException { + checkPath(path); + CompletableFuture f = new CompletableFuture<>(); + f.complete(-1); + return f; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java similarity index 51% rename from hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java index b77c244220a9e..7c5a5d949a072 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java @@ -14,24 +14,42 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.fs; + +package org.apache.hadoop.fs.impl; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Comparator; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.commons.compress.utils.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BBPartHandle; +import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FSDataOutputStreamBuilder; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InternalOperations; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathHandle; +import org.apache.hadoop.fs.UploadHandle; import org.apache.hadoop.fs.permission.FsPermission; import static org.apache.hadoop.fs.Path.mergePaths; @@ -50,40 +68,82 @@ */ @InterfaceAudience.Private @InterfaceStability.Unstable -public class FileSystemMultipartUploader extends MultipartUploader { +public class FileSystemMultipartUploader extends AbstractMultipartUploader { + + private static final Logger LOG = LoggerFactory.getLogger( + FileSystemMultipartUploader.class); private final FileSystem fs; - public FileSystemMultipartUploader(FileSystem fs) { + private final FileSystemMultipartUploaderBuilder builder; + + private final FsPermission permission; + + private final long blockSize; + + private final Options.ChecksumOpt checksumOpt; + + public FileSystemMultipartUploader( + final FileSystemMultipartUploaderBuilder builder, + FileSystem fs) { + super(builder.getPath()); + this.builder = builder; this.fs = fs; + blockSize = builder.getBlockSize(); + checksumOpt = builder.getChecksumOpt(); + permission = builder.getPermission(); } @Override - public UploadHandle initialize(Path filePath) throws IOException { - Path collectorPath = createCollectorPath(filePath); - fs.mkdirs(collectorPath, FsPermission.getDirDefault()); + public CompletableFuture startUpload(Path filePath) + throws IOException { + checkPath(filePath); + return FutureIOSupport.eval(() -> { + Path collectorPath = createCollectorPath(filePath); + fs.mkdirs(collectorPath, FsPermission.getDirDefault()); - ByteBuffer byteBuffer = ByteBuffer.wrap( - collectorPath.toString().getBytes(Charsets.UTF_8)); - return BBUploadHandle.from(byteBuffer); + ByteBuffer byteBuffer = ByteBuffer.wrap( + collectorPath.toString().getBytes(Charsets.UTF_8)); + return BBUploadHandle.from(byteBuffer); + }); } @Override - public PartHandle putPart(Path filePath, InputStream inputStream, - int partNumber, UploadHandle uploadId, long lengthInBytes) + public CompletableFuture putPart(UploadHandle uploadId, + int partNumber, Path filePath, + InputStream inputStream, + long lengthInBytes) throws IOException { checkPutArguments(filePath, inputStream, partNumber, uploadId, lengthInBytes); + return FutureIOSupport.eval(() -> innerPutPart(filePath, + inputStream, partNumber, uploadId, lengthInBytes)); + } + + private PartHandle innerPutPart(Path filePath, + InputStream inputStream, + int partNumber, + UploadHandle uploadId, + long lengthInBytes) + throws IOException { byte[] uploadIdByteArray = uploadId.toByteArray(); checkUploadId(uploadIdByteArray); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, Charsets.UTF_8)); Path partPath = mergePaths(collectorPath, mergePaths(new Path(Path.SEPARATOR), - new Path(Integer.toString(partNumber) + ".part"))); - try(FSDataOutputStream fsDataOutputStream = - fs.createFile(partPath).build()) { - IOUtils.copy(inputStream, fsDataOutputStream, 4096); + new Path(partNumber + ".part"))); + final FSDataOutputStreamBuilder fileBuilder = fs.createFile(partPath); + if (checksumOpt != null) { + fileBuilder.checksumOpt(checksumOpt); + } + if (permission != null) { + fileBuilder.permission(permission); + } + try (FSDataOutputStream fsDataOutputStream = + fileBuilder.blockSize(blockSize).build()) { + IOUtils.copy(inputStream, fsDataOutputStream, + this.builder.getBufferSize()); } finally { cleanupWithLogger(LOG, inputStream); } @@ -106,16 +166,36 @@ private PathHandle getPathHandle(Path filePath) throws IOException { private long totalPartsLen(List partHandles) throws IOException { long totalLen = 0; - for (Path p: partHandles) { + for (Path p : partHandles) { totalLen += fs.getFileStatus(p).getLen(); } return totalLen; } @Override - @SuppressWarnings("deprecation") // rename w/ OVERWRITE - public PathHandle complete(Path filePath, Map handleMap, - UploadHandle multipartUploadId) throws IOException { + public CompletableFuture complete( + UploadHandle uploadId, + Path filePath, + Map handleMap) throws IOException { + + checkPath(filePath); + return FutureIOSupport.eval(() -> + innerComplete(uploadId, filePath, handleMap)); + } + + /** + * The upload complete operation. + * @param multipartUploadId the ID of the upload + * @param filePath path + * @param handleMap map of handles + * @return the path handle + * @throws IOException failure + */ + private PathHandle innerComplete( + UploadHandle multipartUploadId, Path filePath, + Map handleMap) throws IOException { + + checkPath(filePath); checkUploadId(multipartUploadId.toByteArray()); @@ -133,6 +213,13 @@ public PathHandle complete(Path filePath, Map handleMap, }) .collect(Collectors.toList()); + int count = partHandles.size(); + // built up to identify duplicates -if the size of this set is + // below that of the number of parts, then there's a duplicate entry. + Set values = new HashSet<>(count); + values.addAll(partHandles); + Preconditions.checkArgument(values.size() == count, + "Duplicate PartHandles"); byte[] uploadIdByteArray = multipartUploadId.toByteArray(); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, Charsets.UTF_8)); @@ -146,35 +233,30 @@ public PathHandle complete(Path filePath, Map handleMap, fs.create(filePathInsideCollector).close(); fs.concat(filePathInsideCollector, partHandles.toArray(new Path[handles.size()])); - fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE); + new InternalOperations() + .rename(fs, filePathInsideCollector, filePath, + Options.Rename.OVERWRITE); } fs.delete(collectorPath, true); return getPathHandle(filePath); } @Override - public void abort(Path filePath, UploadHandle uploadId) throws IOException { + public CompletableFuture abort(UploadHandle uploadId, + Path filePath) + throws IOException { + checkPath(filePath); byte[] uploadIdByteArray = uploadId.toByteArray(); checkUploadId(uploadIdByteArray); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, Charsets.UTF_8)); - // force a check for a file existing; raises FNFE if not found - fs.getFileStatus(collectorPath); - fs.delete(collectorPath, true); - } - - /** - * Factory for creating MultipartUploaderFactory objects for file:// - * filesystems. - */ - public static class Factory extends MultipartUploaderFactory { - protected MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) { - if (fs.getScheme().equals("file")) { - return new FileSystemMultipartUploader(fs); - } + return FutureIOSupport.eval(() -> { + // force a check for a file existing; raises FNFE if not found + fs.getFileStatus(collectorPath); + fs.delete(collectorPath, true); return null; - } + }); } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java new file mode 100644 index 0000000000000..7c4d995c69d1b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploaderBuilder.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.EnumSet; + +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; + +/** + * Builder for {@link FileSystemMultipartUploader}. + */ +public class FileSystemMultipartUploaderBuilder extends + MultipartUploaderBuilderImpl { + + public FileSystemMultipartUploaderBuilder( + @Nonnull final FileSystem fileSystem, + @Nonnull final Path path) { + super(fileSystem, path); + } + + @Override + public FileSystemMultipartUploaderBuilder getThisBuilder() { + return this; + } + + @Override + public FileSystemMultipartUploader build() + throws IllegalArgumentException, IOException { + return new FileSystemMultipartUploader(this, getFS()); + } + + @Override + public FileSystem getFS() { + return super.getFS(); + } + + @Override + public FsPermission getPermission() { + return super.getPermission(); + } + + @Override + public int getBufferSize() { + return super.getBufferSize(); + } + + @Override + public short getReplication() { + return super.getReplication(); + } + + @Override + public EnumSet getFlags() { + return super.getFlags(); + } + + @Override + public Options.ChecksumOpt getChecksumOpt() { + return super.getChecksumOpt(); + } + + @Override + protected long getBlockSize() { + return super.getBlockSize(); + } + + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java index f5ef8c4923328..8d4bebda15096 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java @@ -20,7 +20,7 @@ import java.io.IOException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FunctionsRaisingIOE.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FunctionsRaisingIOE.java index 7bbb34622647d..551cf9cff3d6f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FunctionsRaisingIOE.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FunctionsRaisingIOE.java @@ -24,7 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability; /** - * Evolving support for functional programming/lambda-expressions. + * Support for functional programming/lambda-expressions. + * @deprecated use {@code org.apache.hadoop.util.functional} */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -37,6 +38,7 @@ private FunctionsRaisingIOE() { * Function of arity 1 which may raise an IOException. * @param type of arg1 * @param type of return value. + * @deprecated use {@link org.apache.hadoop.util.functional.FunctionRaisingIOE} */ @FunctionalInterface public interface FunctionRaisingIOE { @@ -49,6 +51,7 @@ public interface FunctionRaisingIOE { * @param type of arg1 * @param type of arg2 * @param type of return value. + * @deprecated use {@link org.apache.hadoop.util.functional.BiFunctionRaisingIOE} */ @FunctionalInterface public interface BiFunctionRaisingIOE { @@ -59,6 +62,7 @@ public interface BiFunctionRaisingIOE { /** * This is a callable which only raises an IOException. * @param return type + * @deprecated use {@link org.apache.hadoop.util.functional.CallableRaisingIOE} */ @FunctionalInterface public interface CallableRaisingIOE { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java index 26856e5b935e0..fe112d59352f5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.util.Map; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -31,9 +32,16 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSBuilder; +import org.apache.hadoop.util.functional.CallableRaisingIOE; +import org.apache.hadoop.util.functional.FutureIO; /** * Support for future IO and the FS Builder subclasses. + * If methods in here are needed for applications, promote + * to {@link FutureIO} for public use -with the original + * method relaying to it. This is to ensure that external + * filesystem implementations can safely use these methods + * without linkage problems surfacing. */ @InterfaceAudience.Private @InterfaceStability.Unstable @@ -52,16 +60,9 @@ private FutureIOSupport() { * @throws IOException if something went wrong * @throws RuntimeException any nested RTE thrown */ - public static T awaitFuture(final Future future) + public static T awaitFuture(final Future future) throws InterruptedIOException, IOException, RuntimeException { - try { - return future.get(); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException(e.toString()) - .initCause(e); - } catch (ExecutionException e) { - return raiseInnerCause(e); - } + return FutureIO.awaitFuture(future); } @@ -81,18 +82,9 @@ public static T awaitFuture(final Future future, final TimeUnit unit) throws InterruptedIOException, IOException, RuntimeException, TimeoutException { - - try { - return future.get(timeout, unit); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException(e.toString()) - .initCause(e); - } catch (ExecutionException e) { - return raiseInnerCause(e); - } + return FutureIO.awaitFuture(future, timeout, unit); } - /** * From the inner cause of an execution exception, extract the inner cause * if it is an IOE or RTE. @@ -109,7 +101,7 @@ public static T awaitFuture(final Future future, */ public static T raiseInnerCause(final ExecutionException e) throws IOException { - throw unwrapInnerException(e); + return FutureIO.raiseInnerCause(e); } /** @@ -124,41 +116,7 @@ public static T raiseInnerCause(final ExecutionException e) */ public static T raiseInnerCause(final CompletionException e) throws IOException { - throw unwrapInnerException(e); - } - - /** - * From the inner cause of an execution exception, extract the inner cause. - * If it is an RTE: throw immediately. - * If it is an IOE: Return. - * If it is a WrappedIOException: Unwrap and return - * Else: create a new IOException. - * - * Recursively handles wrapped Execution and Completion Exceptions in - * case something very complicated has happened. - * @param e exception. - * @return an IOException extracted or built from the cause. - * @throws RuntimeException if that is the inner cause. - */ - private static IOException unwrapInnerException(final Throwable e) { - Throwable cause = e.getCause(); - if (cause instanceof IOException) { - return (IOException) cause; - } else if (cause instanceof WrappedIOException) { - return ((WrappedIOException) cause).getCause(); - } else if (cause instanceof CompletionException) { - return unwrapInnerException(cause); - } else if (cause instanceof ExecutionException) { - return unwrapInnerException(cause); - } else if (cause instanceof RuntimeException) { - throw (RuntimeException) cause; - } else if (cause != null) { - // other type: wrap with a new IOE - return new IOException(cause); - } else { - // this only happens if there was no cause. - return new IOException(e); - } + return FutureIO.raiseInnerCause(e); } /** @@ -224,4 +182,29 @@ public static void propagateOptions( } } } + + /** + * Evaluate a CallableRaisingIOE in the current thread, + * converting IOEs to RTEs and propagating. + * @param callable callable to invoke + * @param Return type. + * @return the evaluated result. + * @throws UnsupportedOperationException fail fast if unsupported + * @throws IllegalArgumentException invalid argument + */ + public static CompletableFuture eval( + CallableRaisingIOE callable) { + CompletableFuture result = new CompletableFuture<>(); + try { + result.complete(callable.apply()); + } catch (UnsupportedOperationException | IllegalArgumentException tx) { + // fail fast here + throw tx; + } catch (Throwable tx) { + // fail lazily here to ensure callers expect all File IO operations to + // surface later + result.completeExceptionally(tx); + } + return result; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java new file mode 100644 index 0000000000000..88c573acc4cb3 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java @@ -0,0 +1,215 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.impl; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.EnumSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.MultipartUploader; +import org.apache.hadoop.fs.MultipartUploaderBuilder; +import org.apache.hadoop.fs.Options.ChecksumOpt; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; + +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; + +/** + * Builder for {@link MultipartUploader} implementations. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class MultipartUploaderBuilderImpl + > + extends AbstractFSBuilderImpl + implements MultipartUploaderBuilder { + + private final FileSystem fs; + + private FsPermission permission; + + private int bufferSize; + + private short replication; + + private long blockSize; + + private final EnumSet flags = EnumSet.noneOf(CreateFlag.class); + + private ChecksumOpt checksumOpt; + + /** + * Return the concrete implementation of the builder instance. + */ + public abstract B getThisBuilder(); + + /** + * Construct from a {@link FileContext}. + * + * @param fc FileContext + * @param p path. + * @throws IOException failure + */ + protected MultipartUploaderBuilderImpl(@Nonnull FileContext fc, + @Nonnull Path p) throws IOException { + super(checkNotNull(p)); + checkNotNull(fc); + this.fs = null; + + FsServerDefaults defaults = fc.getServerDefaults(p); + bufferSize = defaults.getFileBufferSize(); + replication = defaults.getReplication(); + blockSize = defaults.getBlockSize(); + } + + /** + * Constructor. + */ + protected MultipartUploaderBuilderImpl(@Nonnull FileSystem fileSystem, + @Nonnull Path p) { + super(fileSystem.makeQualified(checkNotNull(p))); + checkNotNull(fileSystem); + fs = fileSystem; + bufferSize = fs.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, + IO_FILE_BUFFER_SIZE_DEFAULT); + replication = fs.getDefaultReplication(p); + blockSize = fs.getDefaultBlockSize(p); + } + + protected FileSystem getFS() { + checkNotNull(fs); + return fs; + } + + protected FsPermission getPermission() { + if (permission == null) { + permission = FsPermission.getFileDefault(); + } + return permission; + } + + /** + * Set permission for the file. + */ + @Override + public B permission(@Nonnull final FsPermission perm) { + checkNotNull(perm); + permission = perm; + return getThisBuilder(); + } + + protected int getBufferSize() { + return bufferSize; + } + + /** + * Set the size of the buffer to be used. + */ + @Override + public B bufferSize(int bufSize) { + bufferSize = bufSize; + return getThisBuilder(); + } + + protected short getReplication() { + return replication; + } + + /** + * Set replication factor. + */ + @Override + public B replication(short replica) { + replication = replica; + return getThisBuilder(); + } + + protected long getBlockSize() { + return blockSize; + } + + /** + * Set block size. + */ + @Override + public B blockSize(long blkSize) { + blockSize = blkSize; + return getThisBuilder(); + } + + protected EnumSet getFlags() { + return flags; + } + + /** + * Create an FSDataOutputStream at the specified path. + */ + @Override + public B create() { + flags.add(CreateFlag.CREATE); + return getThisBuilder(); + } + + /** + * Set to true to overwrite the existing file. + * Set it to false, an exception will be thrown when calling {@link #build()} + * if the file exists. + */ + @Override + public B overwrite(boolean overwrite) { + if (overwrite) { + flags.add(CreateFlag.OVERWRITE); + } else { + flags.remove(CreateFlag.OVERWRITE); + } + return getThisBuilder(); + } + + /** + * Append to an existing file (optional operation). + */ + @Override + public B append() { + flags.add(CreateFlag.APPEND); + return getThisBuilder(); + } + + protected ChecksumOpt getChecksumOpt() { + return checksumOpt; + } + + /** + * Set checksum opt. + */ + @Override + public B checksumOpt(@Nonnull final ChecksumOpt chksumOpt) { + checkNotNull(chksumOpt); + checksumOpt = chksumOpt; + return getThisBuilder(); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java index 9332ac6e7eedb..1e3e43581dccc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathCapabilities; -import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/StoreImplementationUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/StoreImplementationUtils.java new file mode 100644 index 0000000000000..605a3538d8b6b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/StoreImplementationUtils.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +package org.apache.hadoop.fs.impl; + +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.StreamCapabilities; + +import static org.apache.hadoop.fs.StreamCapabilities.HFLUSH; +import static org.apache.hadoop.fs.StreamCapabilities.HSYNC; + +/** + * Utility classes to help implementing filesystems and streams. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class StoreImplementationUtils { + + private StoreImplementationUtils() { + } + + /** + * Check the probe capability being for {@link StreamCapabilities#HSYNC} + * or {@link StreamCapabilities#HFLUSH} + * {@code Syncable.hsync()} and {@code Syncable.hflush()} functionality. + * @param capability capability string. + * @return true if either refers to one of the Syncable operations. + */ + public static boolean isProbeForSyncable(String capability) { + return capability.equalsIgnoreCase(HSYNC) || + capability.equalsIgnoreCase(HFLUSH); + } + + /** + * Probe for an object having a capability; returns true + * if the stream implements {@link StreamCapabilities} and its + * {@code hasCapabilities()} method returns true for the capability. + * This is a package private method intended to provided a common + * implementation for input and output streams. + * {@link StreamCapabilities#hasCapability(String)} call is for public use. + * @param object object to probe. + * @param capability capability to probe for + * @return true if the object implements stream capabilities and + * declares that it supports the capability. + */ + static boolean objectHasCapability(Object object, String capability) { + if (object instanceof StreamCapabilities) { + return ((StreamCapabilities) object).hasCapability(capability); + } + return false; + } + + /** + * Probe for an output stream having a capability; returns true + * if the stream implements {@link StreamCapabilities} and its + * {@code hasCapabilities()} method returns true for the capability. + * @param out output stream + * @param capability capability to probe for + * @return true if the stream declares that it supports the capability. + */ + public static boolean hasCapability(OutputStream out, String capability) { + return objectHasCapability(out, capability); + } + + /** + * Probe for an input stream having a capability; returns true + * if the stream implements {@link StreamCapabilities} and its + * {@code hasCapabilities()} method returns true for the capability. + * @param in input stream + * @param capability capability to probe for + * @return true if the stream declares that it supports the capability. + */ + public static boolean hasCapability(InputStream in, String capability) { + return objectHasCapability(in, capability); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/WrappedIOException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/WrappedIOException.java index 1de1ecb785368..2fcdee915ede9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/WrappedIOException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/WrappedIOException.java @@ -19,9 +19,10 @@ package org.apache.hadoop.fs.impl; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.concurrent.ExecutionException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -33,10 +34,12 @@ * * The constructor signature guarantees the cause will be an IOException, * and as it checks for a null-argument, non-null. + * @deprecated use the {@code UncheckedIOException}. */ +@Deprecated @InterfaceAudience.Private @InterfaceStability.Unstable -public class WrappedIOException extends RuntimeException { +public class WrappedIOException extends UncheckedIOException { private static final long serialVersionUID = 2510210974235779294L; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java index a902488377239..04a045299b7ea 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java @@ -21,7 +21,7 @@ import java.util.Collection; import java.util.List; -import com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java index 385fed21d4194..674b88083d3dc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java @@ -22,9 +22,9 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.base.Objects; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * An AclStatus contains the ACL information of a specific file. AclStatus diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java index 42492520dceaa..58b24f200429b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * AclUtil contains utility methods for manipulating ACLs. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java index ed33357b51d2b..297ec048e50c7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java @@ -19,12 +19,12 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.net.URLDecoder; import java.util.ArrayList; import java.util.Vector; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Progressable; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.jcraft.jsch.ChannelSftp; import com.jcraft.jsch.ChannelSftp.LsEntry; import com.jcraft.jsch.SftpATTRS; @@ -51,6 +51,7 @@ public class SFTPFileSystem extends FileSystem { private SFTPConnectionPool connectionPool; private URI uri; + private final AtomicBoolean closed = new AtomicBoolean(false); private static final int DEFAULT_SFTP_PORT = 22; private static final int DEFAULT_MAX_CONNECTION = 5; @@ -84,6 +85,7 @@ public class SFTPFileSystem extends FileSystem { "Destination path %s already exist, cannot rename!"; public static final String E_FAILED_GETHOME = "Failed to get home directory"; public static final String E_FAILED_DISCONNECT = "Failed to disconnect"; + public static final String E_FS_CLOSED = "FileSystem is closed!"; /** * Set configuration from UI. @@ -139,8 +141,9 @@ private void setConfigurationFromURI(URI uriInfo, Configuration conf) * @throws IOException */ private ChannelSftp connect() throws IOException { - Configuration conf = getConf(); + checkNotClosed(); + Configuration conf = getConf(); String host = conf.get(FS_SFTP_HOST, null); int port = conf.getInt(FS_SFTP_HOST_PORT, DEFAULT_SFTP_PORT); String user = conf.get(FS_SFTP_USER_PREFIX + host, null); @@ -516,20 +519,21 @@ public FSDataInputStream open(Path f, int bufferSize) throws IOException { disconnect(channel); throw new IOException(String.format(E_PATH_DIR, f)); } - InputStream is; try { // the path could be a symbolic link, so get the real path absolute = new Path("/", channel.realpath(absolute.toUri().getPath())); - - is = channel.get(absolute.toUri().getPath()); } catch (SftpException e) { throw new IOException(e); } - return new FSDataInputStream(new SFTPInputStream(is, statistics)){ + return new FSDataInputStream( + new SFTPInputStream(channel, absolute, statistics)){ @Override public void close() throws IOException { - super.close(); - disconnect(channel); + try { + super.close(); + } finally { + disconnect(channel); + } } }; } @@ -703,6 +707,31 @@ public FileStatus getFileStatus(Path f) throws IOException { } } + @Override + public void close() throws IOException { + if (closed.getAndSet(true)) { + return; + } + try { + super.close(); + } finally { + if (connectionPool != null) { + connectionPool.shutdown(); + } + } + } + + /** + * Verify that the input stream is open. Non blocking; this gives + * the last state of the volatile {@link #closed} field. + * @throws IOException if the connection is closed. + */ + private void checkNotClosed() throws IOException { + if (closed.get()) { + throw new IOException(uri + ": " + E_FS_CLOSED); + } + } + @VisibleForTesting SFTPConnectionPool getConnectionPool() { return connectionPool; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java index 7af299bd113e1..d0f9a8d0887ca 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java @@ -15,62 +15,107 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.hadoop.fs.sftp; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import com.jcraft.jsch.ChannelSftp; +import com.jcraft.jsch.SftpATTRS; +import com.jcraft.jsch.SftpException; + +import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; /** SFTP FileSystem input stream. */ class SFTPInputStream extends FSInputStream { - public static final String E_SEEK_NOTSUPPORTED = "Seek not supported"; - public static final String E_NULL_INPUTSTREAM = "Null InputStream"; - public static final String E_STREAM_CLOSED = "Stream closed"; - + private final ChannelSftp channel; + private final Path path; private InputStream wrappedStream; private FileSystem.Statistics stats; private boolean closed; private long pos; + private long nextPos; + private long contentLength; - SFTPInputStream(InputStream stream, FileSystem.Statistics stats) { - - if (stream == null) { - throw new IllegalArgumentException(E_NULL_INPUTSTREAM); + SFTPInputStream(ChannelSftp channel, Path path, FileSystem.Statistics stats) + throws IOException { + try { + this.channel = channel; + this.path = path; + this.stats = stats; + this.wrappedStream = channel.get(path.toUri().getPath()); + SftpATTRS stat = channel.lstat(path.toString()); + this.contentLength = stat.getSize(); + } catch (SftpException e) { + throw new IOException(e); } - this.wrappedStream = stream; - this.stats = stats; + } - this.pos = 0; - this.closed = false; + @Override + public synchronized void seek(long position) throws IOException { + checkNotClosed(); + if (position < 0) { + throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK); + } + nextPos = position; } @Override - public void seek(long position) throws IOException { - throw new IOException(E_SEEK_NOTSUPPORTED); + public synchronized int available() throws IOException { + checkNotClosed(); + long remaining = contentLength - nextPos; + if (remaining > Integer.MAX_VALUE) { + return Integer.MAX_VALUE; + } + return (int) remaining; + } + + private void seekInternal() throws IOException { + if (pos == nextPos) { + return; + } + if (nextPos > pos) { + long skipped = wrappedStream.skip(nextPos - pos); + pos = pos + skipped; + } + if (nextPos < pos) { + wrappedStream.close(); + try { + wrappedStream = channel.get(path.toUri().getPath()); + pos = wrappedStream.skip(nextPos); + } catch (SftpException e) { + throw new IOException(e); + } + } } @Override public boolean seekToNewSource(long targetPos) throws IOException { - throw new IOException(E_SEEK_NOTSUPPORTED); + return false; } @Override - public long getPos() throws IOException { - return pos; + public synchronized long getPos() throws IOException { + return nextPos; } @Override public synchronized int read() throws IOException { - if (closed) { - throw new IOException(E_STREAM_CLOSED); + checkNotClosed(); + if (this.contentLength == 0 || (nextPos >= contentLength)) { + return -1; } - + seekInternal(); int byteRead = wrappedStream.read(); if (byteRead >= 0) { pos++; + nextPos++; } if (stats != null & byteRead >= 0) { stats.incrementBytesRead(1); @@ -78,23 +123,6 @@ public synchronized int read() throws IOException { return byteRead; } - public synchronized int read(byte[] buf, int off, int len) - throws IOException { - if (closed) { - throw new IOException(E_STREAM_CLOSED); - } - - int result = wrappedStream.read(buf, off, len); - if (result > 0) { - pos += result; - } - if (stats != null & result > 0) { - stats.incrementBytesRead(result); - } - - return result; - } - public synchronized void close() throws IOException { if (closed) { return; @@ -103,4 +131,12 @@ public synchronized void close() throws IOException { wrappedStream.close(); closed = true; } + + private void checkNotClosed() throws IOException { + if (closed) { + throw new IOException( + path.toUri() + ": " + FSExceptionMessages.STREAM_IS_CLOSED + ); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java index 5a60ef2ae9b03..dcff0094eccf5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java @@ -22,7 +22,7 @@ import java.util.LinkedList; import java.util.List; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java index ca9961aeb65a0..90a709dffc0c1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java @@ -54,6 +54,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST; +import static org.apache.hadoop.fs.CreateFlag.OVERWRITE; /** * Provides: argument processing to ensure the destination is valid @@ -515,7 +516,8 @@ FSDataOutputStream create(PathData item, boolean lazyPersist) defaultBlockSize = getDefaultBlockSize(item.path); } - EnumSet createFlags = EnumSet.of(CREATE, LAZY_PERSIST); + EnumSet createFlags = + EnumSet.of(CREATE, LAZY_PERSIST, OVERWRITE); return create(item.path, FsPermission.getFileDefault().applyUMask( FsPermission.getUMask(getConf())), diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Concat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Concat.java new file mode 100644 index 0000000000000..f25b689e7ed17 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Concat.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.shell; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.LinkedList; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIOException; + +/** + * Concat the given files. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class Concat extends FsCommand { + public static void registerCommands(CommandFactory factory) { + factory.addClass(Concat.class, "-concat"); + } + + public static final String NAME = "concat"; + public static final String USAGE = " ..."; + public static final String DESCRIPTION = "Concatenate existing source files" + + " into the target file. Target file and source files should be in the" + + " same directory."; + private static FileSystem testFs; // test only. + + @Override + protected void processArguments(LinkedList args) + throws IOException { + if (args.size() < 1) { + throw new IOException("Target path not specified. " + USAGE); + } + if (args.size() < 3) { + throw new IOException( + "The number of source paths is less than 2. " + USAGE); + } + PathData target = args.removeFirst(); + LinkedList srcList = args; + if (!target.exists || !target.stat.isFile()) { + throw new FileNotFoundException(String + .format("Target path %s does not exist or is" + " not file.", + target.path)); + } + Path[] srcArray = new Path[srcList.size()]; + for (int i = 0; i < args.size(); i++) { + PathData src = srcList.get(i); + if (!src.exists || !src.stat.isFile()) { + throw new FileNotFoundException( + String.format("%s does not exist or is not file.", src.path)); + } + srcArray[i] = src.path; + } + FileSystem fs = target.fs; + if (testFs != null) { + fs = testFs; + } + try { + fs.concat(target.path, srcArray); + } catch (UnsupportedOperationException exception) { + throw new PathIOException("Dest filesystem '" + fs.getUri().getScheme() + + "' doesn't support concat.", exception); + } + } + + @VisibleForTesting + static void setTestFs(FileSystem fs) { + testFs = fs; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java index 4622c75fbd410..77f63170593ab 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java @@ -30,7 +30,7 @@ import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FSDataInputStream; @@ -239,26 +239,35 @@ protected void processOptions(LinkedList args) * Copy local files to a remote filesystem */ public static class Put extends CommandWithDestination { + private ThreadPoolExecutor executor = null; + private int numThreads = 1; + + private static final int MAX_THREADS = + Runtime.getRuntime().availableProcessors() * 2; + public static final String NAME = "put"; public static final String USAGE = - "[-f] [-p] [-l] [-d] ... "; + "[-f] [-p] [-l] [-d] [-t ] ... "; public static final String DESCRIPTION = - "Copy files from the local file system " + - "into fs. Copying fails if the file already " + - "exists, unless the -f flag is given.\n" + - "Flags:\n" + - " -p : Preserves access and modification times, ownership and the mode.\n" + - " -f : Overwrites the destination if it already exists.\n" + - " -l : Allow DataNode to lazily persist the file to disk. Forces\n" + - " replication factor of 1. This flag will result in reduced\n" + - " durability. Use with care.\n" + + "Copy files from the local file system " + + "into fs. Copying fails if the file already " + + "exists, unless the -f flag is given.\n" + + "Flags:\n" + + " -p : Preserves timestamps, ownership and the mode.\n" + + " -f : Overwrites the destination if it already exists.\n" + + " -t : Number of threads to be used, default is 1.\n" + + " -l : Allow DataNode to lazily persist the file to disk. Forces" + + " replication factor of 1. This flag will result in reduced" + + " durability. Use with care.\n" + " -d : Skip creation of temporary file(._COPYING_).\n"; @Override protected void processOptions(LinkedList args) throws IOException { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d"); + cf.addOptionWithValue("t"); cf.parse(args); + setNumberThreads(cf.getOptValue("t")); setOverwrite(cf.getOpt("f")); setPreserve(cf.getOpt("p")); setLazyPersist(cf.getOpt("l")); @@ -288,32 +297,22 @@ protected void processArguments(LinkedList args) copyStreamToTarget(System.in, getTargetPath(args.get(0))); return; } - super.processArguments(args); - } - } - public static class CopyFromLocal extends Put { - private ThreadPoolExecutor executor = null; - private int numThreads = 1; + executor = new ThreadPoolExecutor(numThreads, numThreads, 1, + TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024), + new ThreadPoolExecutor.CallerRunsPolicy()); + super.processArguments(args); - private static final int MAX_THREADS = - Runtime.getRuntime().availableProcessors() * 2; - public static final String NAME = "copyFromLocal"; - public static final String USAGE = - "[-f] [-p] [-l] [-d] [-t ] ... "; - public static final String DESCRIPTION = - "Copy files from the local file system " + - "into fs. Copying fails if the file already " + - "exists, unless the -f flag is given.\n" + - "Flags:\n" + - " -p : Preserves access and modification times, ownership and the" + - " mode.\n" + - " -f : Overwrites the destination if it already exists.\n" + - " -t : Number of threads to be used, default is 1.\n" + - " -l : Allow DataNode to lazily persist the file to disk. Forces" + - " replication factor of 1. This flag will result in reduced" + - " durability. Use with care.\n" + - " -d : Skip creation of temporary file(._COPYING_).\n"; + // issue the command and then wait for it to finish + executor.shutdown(); + try { + executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES); + } catch (InterruptedException e) { + executor.shutdownNow(); + displayError(e); + Thread.currentThread().interrupt(); + } + } private void setNumberThreads(String numberThreadsString) { if (numberThreadsString == null) { @@ -330,22 +329,6 @@ private void setNumberThreads(String numberThreadsString) { } } - @Override - protected void processOptions(LinkedList args) throws IOException { - CommandFormat cf = - new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d"); - cf.addOptionWithValue("t"); - cf.parse(args); - setNumberThreads(cf.getOptValue("t")); - setOverwrite(cf.getOpt("f")); - setPreserve(cf.getOpt("p")); - setLazyPersist(cf.getOpt("l")); - setDirectWrite(cf.getOpt("d")); - getRemoteDestination(args); - // should have a -r option - setRecursive(true); - } - private void copyFile(PathData src, PathData target) throws IOException { if (isPathRecursable(src)) { throw new PathIsDirectoryException(src.toString()); @@ -372,25 +355,6 @@ protected void copyFileToTarget(PathData src, PathData target) executor.submit(task); } - @Override - protected void processArguments(LinkedList args) - throws IOException { - executor = new ThreadPoolExecutor(numThreads, numThreads, 1, - TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024), - new ThreadPoolExecutor.CallerRunsPolicy()); - super.processArguments(args); - - // issue the command and then wait for it to finish - executor.shutdown(); - try { - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES); - } catch (InterruptedException e) { - executor.shutdownNow(); - displayError(e); - Thread.currentThread().interrupt(); - } - } - @VisibleForTesting public int getNumThreads() { return numThreads; @@ -401,6 +365,12 @@ public ThreadPoolExecutor getExecutor() { return executor; } } + + public static class CopyFromLocal extends Put { + public static final String NAME = "copyFromLocal"; + public static final String USAGE = Put.USAGE; + public static final String DESCRIPTION = "Identical to the -put command."; + } public static class CopyToLocal extends Get { public static final String NAME = "copyToLocal"; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java index 784bbf33f7826..9cafbb0f151a9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java @@ -70,6 +70,7 @@ public static void registerCommands(CommandFactory factory) { factory.registerCommands(Truncate.class); factory.registerCommands(SnapshotCommands.class); factory.registerCommands(XAttrCommands.class); + factory.registerCommands(Concat.class); } protected FsCommand() {} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java index 6596527738058..64aade3df9539 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java @@ -128,7 +128,8 @@ private void addToUsagesTable(URI uri, FsStatus fsStatus, @Override protected void processPath(PathData item) throws IOException { - if (ViewFileSystemUtil.isViewFileSystem(item.fs)) { + if (ViewFileSystemUtil.isViewFileSystem(item.fs) + || ViewFileSystemUtil.isViewFileSystemOverloadScheme(item.fs)) { ViewFileSystem viewFileSystem = (ViewFileSystem) item.fs; Map fsStatusMap = ViewFileSystemUtil.getStatus(viewFileSystem, item.path); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java index efc541ccf81ee..b50eb69a26d70 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java @@ -25,7 +25,7 @@ import java.util.Date; import java.util.LinkedList; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java index 5ef42775ea58b..c20293e1a5adb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.PathExistsException; -import org.apache.hadoop.fs.shell.CopyCommands.Put; +import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal; /** Various commands for moving files */ @InterfaceAudience.Private @@ -41,12 +41,22 @@ public static void registerCommands(CommandFactory factory) { /** * Move local files to a remote filesystem */ - public static class MoveFromLocal extends Put { + public static class MoveFromLocal extends CopyFromLocal { public static final String NAME = "moveFromLocal"; - public static final String USAGE = " ... "; + public static final String USAGE = + "[-f] [-p] [-l] [-d] ... "; public static final String DESCRIPTION = - "Same as -put, except that the source is " + - "deleted after it's copied."; + "Same as -put, except that the source is " + + "deleted after it's copied\n" + + "and -t option has not yet implemented."; + + @Override + protected void processOptions(LinkedList args) throws IOException { + if(args.contains("-t")) { + throw new CommandFormat.UnknownOptionException("-t"); + } + super.processOptions(args); + } @Override protected void processPath(PathData src, PathData target) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java index 4bd596a40d678..75dc86ec87c18 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Snapshot related operations diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java index 8a75a60f435ca..22dd32bce8512 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java @@ -28,7 +28,7 @@ import org.apache.hadoop.fs.PathIsDirectoryException; import org.apache.hadoop.io.IOUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Get a listing of all files in that match the file patterns. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java index be174b5e9cf68..872de306d287a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java @@ -31,7 +31,7 @@ import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.util.StringUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Unix touch like commands @@ -102,8 +102,8 @@ public static class Touch extends TouchCommands { public static final String NAME = "touch"; public static final String USAGE = "[-" + OPTION_CHANGE_ONLY_ACCESS_TIME + "] [-" + OPTION_CHANGE_ONLY_MODIFICATION_TIME + "] [-" - + OPTION_USE_TIMESTAMP + " TIMESTAMP ] [-" + OPTION_DO_NOT_CREATE_FILE - + "] ..."; + + OPTION_USE_TIMESTAMP + " TIMESTAMP (yyyyMMdd:HHmmss) ] " + + "[-" + OPTION_DO_NOT_CREATE_FILE + "] ..."; public static final String DESCRIPTION = "Updates the access and modification times of the file specified by the" + " to the current time. If the file does not exist, then a zero" @@ -114,7 +114,8 @@ public static class Touch extends TouchCommands { + OPTION_CHANGE_ONLY_MODIFICATION_TIME + " Change only the modification time \n" + "-" + OPTION_USE_TIMESTAMP + " TIMESTAMP" - + " Use specified timestamp (in format yyyyMMddHHmmss) instead of current time \n" + + " Use specified timestamp instead of current time\n" + + " TIMESTAMP format yyyyMMdd:HHmmss\n" + "-" + OPTION_DO_NOT_CREATE_FILE + " Do not create any files"; private boolean changeModTime = false; @@ -137,7 +138,7 @@ protected void processOptions(LinkedList args) { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, OPTION_USE_TIMESTAMP, OPTION_CHANGE_ONLY_ACCESS_TIME, - OPTION_CHANGE_ONLY_MODIFICATION_TIME); + OPTION_CHANGE_ONLY_MODIFICATION_TIME, OPTION_DO_NOT_CREATE_FILE); cf.parse(args); this.changeModTime = cf.getOpt(OPTION_CHANGE_ONLY_MODIFICATION_TIME); this.changeAccessTime = cf.getOpt(OPTION_CHANGE_ONLY_ACCESS_TIME); @@ -183,7 +184,8 @@ private void updateTime(PathData item) throws IOException { time = dateFormat.parse(timestamp).getTime(); } catch (ParseException e) { throw new IllegalArgumentException( - "Unable to parse the specified timestamp " + timestamp, e); + "Unable to parse the specified timestamp "+ timestamp + + ". The expected format is " + dateFormat.toPattern(), e); } } if (changeModTime ^ changeAccessTime) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java index 630177611940e..2fe7c858e4e66 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java @@ -23,7 +23,7 @@ import java.util.Map; import java.util.Map.Entry; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/BufferedIOStatisticsInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/BufferedIOStatisticsInputStream.java new file mode 100644 index 0000000000000..bdc432570542b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/BufferedIOStatisticsInputStream.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import java.io.BufferedInputStream; +import java.io.InputStream; + +import org.apache.hadoop.fs.StreamCapabilities; + +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; + +/** + * An extension of {@code BufferedInputStream} which implements + * {@link IOStatisticsSource} and forwards requests for the + * {@link IOStatistics} to the wrapped stream. + * + * This should be used when any input stream needs buffering while + * allowing the inner stream to be a source of statistics. + * + * It also implements {@link StreamCapabilities} and forwards the probe + * to the inner stream, if possible. + */ +public class BufferedIOStatisticsInputStream + extends BufferedInputStream + implements IOStatisticsSource, StreamCapabilities { + + /** + * Buffer an input stream with the default buffer size of 8k. + * @param in input stream + */ + public BufferedIOStatisticsInputStream(final InputStream in) { + super(in); + } + + /** + * Buffer an input stream with the chosen buffer size. + * @param in input stream + * @param size buffer size + */ + public BufferedIOStatisticsInputStream(final InputStream in, final int size) { + super(in, size); + } + + /** + * Return any IOStatistics offered by the inner stream. + * @return inner IOStatistics or null + */ + @Override + public IOStatistics getIOStatistics() { + return retrieveIOStatistics(in); + } + + /** + * If the inner stream supports {@link StreamCapabilities}, + * forward the probe to it. + * Otherwise: return false. + * + * @param capability string to query the stream support for. + * @return true if a capability is known to be supported. + */ + @Override + public boolean hasCapability(final String capability) { + if (in instanceof StreamCapabilities) { + return ((StreamCapabilities) in).hasCapability(capability); + } else { + return false; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/BufferedIOStatisticsOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/BufferedIOStatisticsOutputStream.java new file mode 100644 index 0000000000000..88e73a0629b1d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/BufferedIOStatisticsOutputStream.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +import org.apache.hadoop.fs.StreamCapabilities; +import org.apache.hadoop.fs.Syncable; + +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; + +/** + * An extension of {@code BufferedOutputStream} which implements + * {@link IOStatisticsSource} and forwards requests for the + * {@link IOStatistics} to the wrapped stream. + * + * This should be used when any output stream needs buffering while + * allowing the inner stream to be a source of statistics. + * + * It also implements {@link StreamCapabilities} + * and {@link Syncable} and forwards to to the inner stream, + * if possible. + */ +public class BufferedIOStatisticsOutputStream + extends BufferedOutputStream + implements IOStatisticsSource, Syncable, StreamCapabilities { + + /** + * Should calls to Syncable downgrade to flush if the underlying + * stream does not support it? + * While that breaks a core contract requirement of Syncable: + * "Sync.sync() guarantees durability", downgrading is + * the default behavior of FsDataOutputStream. + */ + private final boolean downgradeSyncable; + + /** + * Construct with default buffer size. + * @param out output stream to buffer + * @param downgradeSyncable should Syncable calls downgrade? + */ + public BufferedIOStatisticsOutputStream( + final OutputStream out, + final boolean downgradeSyncable) { + super(out); + this.downgradeSyncable = downgradeSyncable; + } + + /** + * Construct with custom buffer size. + * + * @param out output stream to buffer + * @param size buffer. + * @param downgradeSyncable should Syncable calls downgrade? + */ + public BufferedIOStatisticsOutputStream( + final OutputStream out, + final int size, + final boolean downgradeSyncable) { + super(out, size); + this.downgradeSyncable = downgradeSyncable; + } + + /** + * Ask the inner stream for their IOStatistics. + * @return any IOStatistics offered by the inner stream. + */ + @Override + public IOStatistics getIOStatistics() { + return retrieveIOStatistics(out); + } + + /** + * If the inner stream supports {@link StreamCapabilities}, + * forward the probe to it. + * Otherwise: return false. + * + * @param capability string to query the stream support for. + * @return true if a capability is known to be supported. + */ + @Override + public boolean hasCapability(final String capability) { + if (out instanceof StreamCapabilities) { + return ((StreamCapabilities) out).hasCapability(capability); + } else { + return false; + } + } + + /** + * If the inner stream is Syncable, flush the buffer and then + * invoke the inner stream's hflush() operation. + * + * Otherwise: throw an exception, unless the stream was constructed with + * {@link #downgradeSyncable} set to true, in which case the stream + * is just flushed. + * @throws IOException IO Problem + * @throws UnsupportedOperationException if the inner class is not syncable + */ + @Override + public void hflush() throws IOException { + if (out instanceof Syncable) { + flush(); + ((Syncable) out).hflush(); + } else { + if (!downgradeSyncable) { + throw new UnsupportedOperationException("hflush not supported by " + + out); + } else { + flush(); + } + } + } + + /** + * If the inner stream is Syncable, flush the buffer and then + * invoke the inner stream's hsync() operation. + * + * Otherwise: throw an exception, unless the stream was constructed with + * {@link #downgradeSyncable} set to true, in which case the stream + * is just flushed. + * @throws IOException IO Problem + * @throws UnsupportedOperationException if the inner class is not syncable + */ + @Override + public void hsync() throws IOException { + if (out instanceof Syncable) { + flush(); + ((Syncable) out).hsync(); + } else { + if (!downgradeSyncable) { + throw new UnsupportedOperationException("hsync not supported by " + + out); + } else { + flush(); + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/DurationStatisticSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/DurationStatisticSummary.java new file mode 100644 index 0000000000000..e1335d77d792a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/DurationStatisticSummary.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import javax.annotation.Nullable; +import java.io.Serializable; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_FAILURES; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MAX; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MEAN; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MIN; + +/** + * Summary of duration tracking statistics + * as extracted from an IOStatistics instance. + *

    + * This is for reporting and testing. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public final class DurationStatisticSummary implements Serializable { + + private static final long serialVersionUID = 6776381340896518486L; + + /** Statistic key. */ + private final String key; + + /** Are these success or failure statistics. */ + private final boolean success; + + /** Count of operation invocations. */ + private final long count; + + /** Max duration; -1 if unknown. */ + private final long max; + + /** Min duration; -1 if unknown. */ + private final long min; + + /** Mean duration -may be null. */ + private final MeanStatistic mean; + + /** + * Constructor. + * @param key Statistic key. + * @param success Are these success or failure statistics. + * @param count Count of operation invocations. + * @param max Max duration; -1 if unknown. + * @param min Min duration; -1 if unknown. + * @param mean Mean duration -may be null. (will be cloned) + */ + public DurationStatisticSummary(final String key, + final boolean success, + final long count, + final long max, + final long min, + @Nullable final MeanStatistic mean) { + this.key = key; + this.success = success; + this.count = count; + this.max = max; + this.min = min; + this.mean = mean == null ? null : mean.clone(); + } + + public String getKey() { + return key; + } + + public boolean isSuccess() { + return success; + } + + public long getCount() { + return count; + } + + public long getMax() { + return max; + } + + public long getMin() { + return min; + } + + public MeanStatistic getMean() { + return mean; + } + + @Override + public String toString() { + return "DurationStatisticSummary{" + + "key='" + key + '\'' + + ", success=" + success + + ", counter=" + count + + ", max=" + max + + ", mean=" + mean + + '}'; + } + + /** + * Fetch the duration timing summary of success or failure operations + * from an IO Statistics source. + * If the duration key is unknown, the summary will be incomplete. + * @param source source of data + * @param key duration statistic key + * @param success fetch success statistics, or if false, failure stats. + * @return a summary of the statistics. + */ + public static DurationStatisticSummary fetchDurationSummary( + IOStatistics source, + String key, + boolean success) { + String fullkey = success ? key : key + SUFFIX_FAILURES; + return new DurationStatisticSummary(key, success, + source.counters().getOrDefault(fullkey, 0L), + source.maximums().getOrDefault(fullkey + SUFFIX_MAX, -1L), + source.minimums().getOrDefault(fullkey + SUFFIX_MIN, -1L), + source.meanStatistics() + .get(fullkey + SUFFIX_MEAN)); + } + + /** + * Fetch the duration timing summary from an IOStatistics source. + * If the duration key is unknown, the summary will be incomplete. + * @param source source of data + * @param key duration statistic key + * @return a summary of the statistics. + */ + public static DurationStatisticSummary fetchSuccessSummary( + IOStatistics source, + String key) { + return fetchDurationSummary(source, key, true); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/DurationTracker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/DurationTracker.java new file mode 100644 index 0000000000000..5a15c7ad66c4f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/DurationTracker.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import java.time.Duration; + +/** + * Interface to be implemented by objects which can track duration. + * It extends AutoCloseable to fit into a try-with-resources statement, + * but then strips out the {@code throws Exception} aspect of the signature + * so it doesn't force code to add extra handling for any failures. + * + * If a duration is declared as "failed()" then the failure counters + * will be updated. + */ +public interface DurationTracker extends AutoCloseable { + + /** + * The operation failed. Failure statistics will be updated. + */ + void failed(); + + /** + * Finish tracking: update the statistics with the timings. + */ + void close(); + + /** + * Get the duration of an operation as a java Duration + * instance. If the duration tracker hasn't completed, + * or its duration tracking doesn't actually measure duration, + * returns Duration.ZERO. + * @return a duration, value of ZERO until close(). + */ + default Duration asDuration() { + return Duration.ZERO; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/DurationTrackerFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/DurationTrackerFactory.java new file mode 100644 index 0000000000000..641d7e8368bb1 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/DurationTrackerFactory.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.stubDurationTracker; + +/** + * Interface for a source of duration tracking. + * + * This is intended for uses where it can be passed into classes + * which update operation durations, without tying those + * classes to internal implementation details. + */ +public interface DurationTrackerFactory { + + /** + * Initiate a duration tracking operation by creating/returning + * an object whose {@code close()} call will + * update the statistics. + * + * The statistics counter with the key name will be incremented + * by the given count. + * + * The expected use is within a try-with-resources clause. + * + * The default implementation returns a stub duration tracker. + * @param key statistic key prefix + * @param count #of times to increment the matching counter in this + * operation. + * @return an object to close after an operation completes. + */ + default DurationTracker trackDuration(String key, long count) { + return stubDurationTracker(); + } + + /** + * Initiate a duration tracking operation by creating/returning + * an object whose {@code close()} call will + * update the statistics. + * The expected use is within a try-with-resources clause. + * @param key statistic key + * @return an object to close after an operation completes. + */ + default DurationTracker trackDuration(String key) { + return trackDuration(key, 1); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatistics.java new file mode 100644 index 0000000000000..75d9965128101 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatistics.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * IO Statistics. + *

    + * These are low-cost per-instance statistics provided by any Hadoop + * I/O class instance. + *

    + * Consult the filesystem specification document for the requirements + * of an implementation of this interface. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface IOStatistics { + + /** + * Map of counters. + * @return the current map of counters. + */ + Map counters(); + + /** + * Map of gauges. + * @return the current map of gauges. + */ + Map gauges(); + + /** + * Map of minimums. + * @return the current map of minimums. + */ + Map minimums(); + + /** + * Map of maximums. + * @return the current map of maximums. + */ + Map maximums(); + + /** + * Map of meanStatistics. + * @return the current map of MeanStatistic statistics. + */ + Map meanStatistics(); + + /** + * Value when a minimum value has never been set. + */ + long MIN_UNSET_VALUE = -1; + + /** + * Value when a max value has never been set. + */ + long MAX_UNSET_VALUE = -1; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsAggregator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsAggregator.java new file mode 100644 index 0000000000000..1c5451c6f0e83 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsAggregator.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import javax.annotation.Nullable; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Interface exported by classes which support + * aggregation of {@link IOStatistics}. + * Implementations MAY aggregate all statistics + * exported by the IOStatistics reference passed in to + * {@link #aggregate(IOStatistics)}, or they + * may selectively aggregate specific values/classes + * of statistics. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface IOStatisticsAggregator { + + /** + * Aggregate the supplied statistics into the current + * set. + * + * @param statistics statistics; may be null + * @return true if the statistics reference was not null and + * so aggregated. + */ + boolean aggregate(@Nullable IOStatistics statistics); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsLogging.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsLogging.java new file mode 100644 index 0000000000000..c7230e25c3434 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsLogging.java @@ -0,0 +1,301 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import javax.annotation.Nullable; +import java.util.Map; +import java.util.TreeMap; +import java.util.function.Predicate; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; + +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; + +/** + * Utility operations convert IO Statistics sources/instances + * to strings, especially for robustly logging. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public final class IOStatisticsLogging { + + private static final Logger LOG = + LoggerFactory.getLogger(IOStatisticsLogging.class); + + private IOStatisticsLogging() { + } + + /** + * Extract the statistics from a source object -or "" + * if it is not an instance of {@link IOStatistics}, + * {@link IOStatisticsSource} or the retrieved + * statistics are null. + *

    + * Exceptions are caught and downgraded to debug logging. + * @param source source of statistics. + * @return a string for logging. + */ + public static String ioStatisticsSourceToString(@Nullable Object source) { + try { + return ioStatisticsToString(retrieveIOStatistics(source)); + } catch (RuntimeException e) { + LOG.debug("Ignoring", e); + return ""; + } + } + + /** + * Convert IOStatistics to a string form. + * @param statistics A statistics instance. + * @return string value or the empty string if null + */ + public static String ioStatisticsToString( + @Nullable final IOStatistics statistics) { + if (statistics != null) { + StringBuilder sb = new StringBuilder(); + mapToString(sb, "counters", statistics.counters(), " "); + mapToString(sb, "gauges", statistics.gauges(), " "); + mapToString(sb, "minimums", statistics.minimums(), " "); + mapToString(sb, "maximums", statistics.maximums(), " "); + mapToString(sb, "means", statistics.meanStatistics(), " "); + + return sb.toString(); + } else { + return ""; + } + } + + /** + * Convert IOStatistics to a string form, with all the metrics sorted + * and empty value stripped. + * This is more expensive than the simple conversion, so should only + * be used for logging/output where it's known/highly likely that the + * caller wants to see the values. Not for debug logging. + * @param statistics A statistics instance. + * @return string value or the empty string if null + */ + public static String ioStatisticsToPrettyString( + @Nullable final IOStatistics statistics) { + if (statistics != null) { + StringBuilder sb = new StringBuilder(); + mapToSortedString(sb, "counters", statistics.counters(), + p -> p == 0); + mapToSortedString(sb, "\ngauges", statistics.gauges(), + p -> p == 0); + mapToSortedString(sb, "\nminimums", statistics.minimums(), + p -> p < 0); + mapToSortedString(sb, "\nmaximums", statistics.maximums(), + p -> p < 0); + mapToSortedString(sb, "\nmeans", statistics.meanStatistics(), + MeanStatistic::isEmpty); + + return sb.toString(); + } else { + return ""; + } + } + + /** + * Given a map, add its entryset to the string. + * The entries are only sorted if the source entryset + * iterator is sorted, such as from a TreeMap. + * @param sb string buffer to append to + * @param type type (for output) + * @param map map to evaluate + * @param separator separator + * @param type of values of the map + */ + private static void mapToString(StringBuilder sb, + final String type, + final Map map, + final String separator) { + int count = 0; + sb.append(type); + sb.append("=("); + for (Map.Entry entry : map.entrySet()) { + if (count > 0) { + sb.append(separator); + } + count++; + sb.append(IOStatisticsBinding.entryToString( + entry.getKey(), entry.getValue())); + } + sb.append(");\n"); + } + + /** + * Given a map, produce a string with all the values, sorted. + * Needs to create a treemap and insert all the entries. + * @param sb string buffer to append to + * @param type type (for output) + * @param map map to evaluate + * @param type of values of the map + */ + private static void mapToSortedString(StringBuilder sb, + final String type, + final Map map, + final Predicate isEmpty) { + mapToString(sb, type, sortedMap(map, isEmpty), "\n"); + } + + /** + * Create a sorted (tree) map from an unsorted map. + * This incurs the cost of creating a map and that + * of inserting every object into the tree. + * @param source source map + * @param value type + * @return a treemap with all the entries. + */ + private static Map sortedMap( + final Map source, + final Predicate isEmpty) { + Map tm = new TreeMap<>(); + for (Map.Entry entry : source.entrySet()) { + if (!isEmpty.test(entry.getValue())) { + tm.put(entry.getKey(), entry.getValue()); + } + } + return tm; + } + + /** + * On demand stringifier of an IOStatisticsSource instance. + *

    + * Whenever this object's toString() method is called, it evaluates the + * statistics. + *

    + * This is designed to affordable to use in log statements. + * @param source source of statistics -may be null. + * @return an object whose toString() operation returns the current values. + */ + public static Object demandStringifyIOStatisticsSource( + @Nullable IOStatisticsSource source) { + return new SourceToString(source); + } + + /** + * On demand stringifier of an IOStatistics instance. + *

    + * Whenever this object's toString() method is called, it evaluates the + * statistics. + *

    + * This is for use in log statements where for the cost of creation + * of this entry is low; it is affordable to use in log statements. + * @param statistics statistics to stringify -may be null. + * @return an object whose toString() operation returns the current values. + */ + public static Object demandStringifyIOStatistics( + @Nullable IOStatistics statistics) { + return new StatisticsToString(statistics); + } + + /** + * Extract any statistics from the source and log at debug, if + * the log is set to log at debug. + * No-op if logging is not at debug or the source is null/of + * the wrong type/doesn't provide statistics. + * @param log log to log to + * @param message message for log -this must contain "{}" for the + * statistics report to actually get logged. + * @param source source object + */ + public static void logIOStatisticsAtDebug( + Logger log, + String message, + Object source) { + if (log.isDebugEnabled()) { + // robust extract and convert to string + String stats = ioStatisticsSourceToString(source); + if (!stats.isEmpty()) { + log.debug(message, stats); + } + } + } + + /** + * Extract any statistics from the source and log to + * this class's log at debug, if + * the log is set to log at debug. + * No-op if logging is not at debug or the source is null/of + * the wrong type/doesn't provide statistics. + * @param message message for log -this must contain "{}" for the + * statistics report to actually get logged. + * @param source source object + */ + public static void logIOStatisticsAtDebug( + String message, + Object source) { + logIOStatisticsAtDebug(LOG, message, source); + } + + /** + * On demand stringifier. + *

    + * Whenever this object's toString() method is called, it + * retrieves the latest statistics instance and re-evaluates it. + */ + private static final class SourceToString { + + private final IOStatisticsSource source; + + private SourceToString(@Nullable IOStatisticsSource source) { + this.source = source; + } + + @Override + public String toString() { + return source != null + ? ioStatisticsSourceToString(source) + : IOStatisticsBinding.NULL_SOURCE; + } + } + + /** + * Stringifier of statistics: low cost to instantiate and every + * toString/logging will re-evaluate the statistics. + */ + private static final class StatisticsToString { + + private final IOStatistics statistics; + + /** + * Constructor. + * @param statistics statistics + */ + private StatisticsToString(@Nullable IOStatistics statistics) { + this.statistics = statistics; + } + + /** + * Evaluate and stringify the statistics. + * @return a string value. + */ + @Override + public String toString() { + return statistics != null + ? ioStatisticsToString(statistics) + : IOStatisticsBinding.NULL_SOURCE; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSnapshot.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSnapshot.java new file mode 100644 index 0000000000000..5b8b2e284cc11 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSnapshot.java @@ -0,0 +1,285 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; +import org.apache.hadoop.util.JsonSerialization; + +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.aggregateMaps; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.snapshotMap; + +/** + * Snapshot of statistics from a different source. + *

    + * It is serializable so that frameworks which can use java serialization + * to propagate data (Spark, Flink...) can send the statistics + * back. For this reason, TreeMaps are explicitly used as field types, + * even though IDEs can recommend use of Map instead. + * For security reasons, untrusted java object streams should never be + * deserialized. If for some reason this is required, use + * {@link #requiredSerializationClasses()} to get the list of classes + * used when deserializing instances of this object. + *

    + *

    + * It is annotated for correct serializations with jackson2. + *

    + */ +@SuppressWarnings("CollectionDeclaredAsConcreteClass") +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class IOStatisticsSnapshot + implements IOStatistics, Serializable, IOStatisticsAggregator { + + private static final long serialVersionUID = -1762522703841538084L; + + /** + * List of chasses needed to deserialize. + */ + private static final Class[] DESERIALIZATION_CLASSES = { + IOStatisticsSnapshot.class, + TreeMap.class, + Long.class, + MeanStatistic.class, + }; + + /** + * Counters. + */ + @JsonProperty + private transient Map counters; + + /** + * Gauges. + */ + @JsonProperty + private transient Map gauges; + + /** + * Minimum values. + */ + @JsonProperty + private transient Map minimums; + + /** + * Maximum values. + */ + @JsonProperty + private transient Map maximums; + + /** + * mean statistics. The JSON key is all lower case.. + */ + @JsonProperty("meanstatistics") + private transient Map meanStatistics; + + /** + * Construct. + */ + public IOStatisticsSnapshot() { + createMaps(); + } + + /** + * Construct, taking a snapshot of the source statistics data + * if the source is non-null. + * If the source is null, the empty maps are created + * @param source statistics source. Nullable. + */ + public IOStatisticsSnapshot(IOStatistics source) { + if (source != null) { + snapshot(source); + } else { + createMaps(); + } + } + + /** + * Create the maps. + */ + private synchronized void createMaps() { + counters = new ConcurrentHashMap<>(); + gauges = new ConcurrentHashMap<>(); + minimums = new ConcurrentHashMap<>(); + maximums = new ConcurrentHashMap<>(); + meanStatistics = new ConcurrentHashMap<>(); + } + + /** + * Clear all the maps. + */ + public synchronized void clear() { + counters.clear(); + gauges.clear(); + minimums.clear(); + maximums.clear(); + meanStatistics.clear(); + } + + /** + * Take a snapshot. + * + * This completely overwrites the map data with the statistics + * from the source. + * @param source statistics source. + */ + public synchronized void snapshot(IOStatistics source) { + checkNotNull(source); + counters = snapshotMap(source.counters()); + gauges = snapshotMap(source.gauges()); + minimums = snapshotMap(source.minimums()); + maximums = snapshotMap(source.maximums()); + meanStatistics = snapshotMap(source.meanStatistics(), + MeanStatistic::copy); + } + + /** + * Aggregate the current statistics with the + * source reference passed in. + * + * The operation is synchronized. + * @param source source; may be null + * @return true if a merge took place. + */ + @Override + public synchronized boolean aggregate( + @Nullable IOStatistics source) { + if (source == null) { + return false; + } + aggregateMaps(counters, source.counters(), + IOStatisticsBinding::aggregateCounters, + IOStatisticsBinding::passthroughFn); + aggregateMaps(gauges, source.gauges(), + IOStatisticsBinding::aggregateGauges, + IOStatisticsBinding::passthroughFn); + aggregateMaps(minimums, source.minimums(), + IOStatisticsBinding::aggregateMinimums, + IOStatisticsBinding::passthroughFn); + aggregateMaps(maximums, source.maximums(), + IOStatisticsBinding::aggregateMaximums, + IOStatisticsBinding::passthroughFn); + aggregateMaps(meanStatistics, source.meanStatistics(), + IOStatisticsBinding::aggregateMeanStatistics, MeanStatistic::copy); + return true; + } + + @Override + public synchronized Map counters() { + return counters; + } + + @Override + public synchronized Map gauges() { + return gauges; + } + + @Override + public synchronized Map minimums() { + return minimums; + } + + @Override + public synchronized Map maximums() { + return maximums; + } + + @Override + public synchronized Map meanStatistics() { + return meanStatistics; + } + + @Override + public String toString() { + return ioStatisticsToString(this); + } + + /** + * Get a JSON serializer for this class. + * @return a serializer. + */ + public static JsonSerialization serializer() { + return new JsonSerialization<>(IOStatisticsSnapshot.class, false, true); + } + + /** + * Serialize by converting each map to a TreeMap, and saving that + * to the stream. + */ + private synchronized void writeObject(ObjectOutputStream s) + throws IOException { + // Write out the core + s.defaultWriteObject(); + s.writeObject(new TreeMap(counters)); + s.writeObject(new TreeMap(gauges)); + s.writeObject(new TreeMap(minimums)); + s.writeObject(new TreeMap(maximums)); + s.writeObject(new TreeMap(meanStatistics)); + } + + /** + * Deserialize by loading each TreeMap, and building concurrent + * hash maps from them. + */ + private void readObject(final ObjectInputStream s) + throws IOException, ClassNotFoundException { + // read in core + s.defaultReadObject(); + // and rebuild a concurrent hashmap from every serialized tree map + // read back from the stream. + counters = new ConcurrentHashMap<>( + (TreeMap) s.readObject()); + gauges = new ConcurrentHashMap<>( + (TreeMap) s.readObject()); + minimums = new ConcurrentHashMap<>( + (TreeMap) s.readObject()); + maximums = new ConcurrentHashMap<>( + (TreeMap) s.readObject()); + meanStatistics = new ConcurrentHashMap<>( + (TreeMap) s.readObject()); + } + + /** + * What classes are needed to deserialize this class? + * Needed to securely unmarshall this from untrusted sources. + * @return a list of required classes to deserialize the data. + */ + public static List requiredSerializationClasses() { + return Arrays.stream(DESERIALIZATION_CLASSES) + .collect(Collectors.toList()); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSource.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSource.java new file mode 100644 index 0000000000000..67bf51fc0c3ae --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSource.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A source of IO statistics. + *

    + * These statistics MUST be instance specific, not thread local. + *

    + */ + +@InterfaceStability.Unstable +public interface IOStatisticsSource { + + /** + * Return a statistics instance. + *

    + * It is not a requirement that the same instance is returned every time. + * {@link IOStatisticsSource}. + *

    + * If the object implementing this is Closeable, this method + * may return null if invoked on a closed object, even if + * it returns a valid instance when called earlier. + * @return an IOStatistics instance or null + */ + default IOStatistics getIOStatistics() { + return null; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSupport.java new file mode 100644 index 0000000000000..75977047c0f2a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSupport.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.statistics.impl.StubDurationTracker; +import org.apache.hadoop.fs.statistics.impl.StubDurationTrackerFactory; + +/** + * Support for working with IOStatistics. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public final class IOStatisticsSupport { + + private IOStatisticsSupport() { + } + + /** + * Take a snapshot of the current statistics state. + *

    + * This is not an atomic option. + *

    + * The instance can be serialized, and its + * {@code toString()} method lists all the values. + * @param statistics statistics + * @return a snapshot of the current values. + */ + public static IOStatisticsSnapshot + snapshotIOStatistics(IOStatistics statistics) { + + return new IOStatisticsSnapshot(statistics); + } + + /** + * Create a snapshot statistics instance ready to aggregate data. + * + * The instance can be serialized, and its + * {@code toString()} method lists all the values. + * @return an empty snapshot + */ + public static IOStatisticsSnapshot + snapshotIOStatistics() { + + return new IOStatisticsSnapshot(); + } + + /** + * Get the IOStatistics of the source, casting it + * if it is of the relevant type, otherwise, + * if it implements {@link IOStatisticsSource} + * extracting the value. + * + * Returns null if the source isn't of the write type + * or the return value of + * {@link IOStatisticsSource#getIOStatistics()} was null. + * @return an IOStatistics instance or null + */ + + public static IOStatistics retrieveIOStatistics( + final Object source) { + if (source instanceof IOStatistics) { + return (IOStatistics) source; + } else if (source instanceof IOStatisticsSource) { + return ((IOStatisticsSource) source).getIOStatistics(); + } else { + // null source or interface not implemented + return null; + } + } + + /** + * Return a stub duration tracker factory whose returned trackers + * are always no-ops. + * + * As singletons are returned, this is very low-cost to use. + * @return a duration tracker factory. + */ + public static DurationTrackerFactory stubDurationTrackerFactory() { + return StubDurationTrackerFactory.STUB_DURATION_TRACKER_FACTORY; + } + + /** + * Get a stub duration tracker. + * @return a stub tracker. + */ + public static DurationTracker stubDurationTracker() { + return StubDurationTracker.STUB_DURATION_TRACKER; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/MeanStatistic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/MeanStatistic.java new file mode 100644 index 0000000000000..d9ff0c25c6a21 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/MeanStatistic.java @@ -0,0 +1,290 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import java.io.Serializable; +import java.util.Objects; + +import com.fasterxml.jackson.annotation.JsonIgnore; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A mean statistic represented as the sum and the sample count; + * the mean is calculated on demand. + *

    + * It can be used to accrue values so as to dynamically update + * the mean. If so, know that there is no synchronization + * on the methods. + *

    + *

    + * If a statistic has 0 samples then it is considered to be empty. + *

    + *

    + * All 'empty' statistics are equivalent, independent of the sum value. + *

    + *

    + * For non-empty statistics, sum and sample values must match + * for equality. + *

    + *

    + * It is serializable and annotated for correct serializations with jackson2. + *

    + *

    + * Thread safety. The operations to add/copy sample data, are thread safe. + *

    + *
      + *
    1. {@link #add(MeanStatistic)}
    2. + *
    3. {@link #addSample(long)}
    4. + *
    5. {@link #clear()}
    6. + *
    7. {@link #setSamplesAndSum(long, long)}
    8. + *
    9. {@link #set(MeanStatistic)}
    10. + *
    11. {@link #setSamples(long)} and {@link #setSum(long)}
    12. + *
    + *

    + * So is the {@link #mean()} method. This ensures that when + * used to aggregated statistics, the aggregate value and sample + * count are set and evaluated consistently. + *

    + *

    + * Other methods marked as synchronized because Findbugs overreacts + * to the idea that some operations to update sum and sample count + * are synchronized, but that things like equals are not. + *

    + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class MeanStatistic implements Serializable, Cloneable { + + private static final long serialVersionUID = 567888327998615425L; + + /** + * Number of samples used to calculate + * the mean. + */ + private long samples; + + /** + * sum of the values. + */ + private long sum; + + /** + * Constructor, with some resilience against invalid sample counts. + * If the sample count is 0 or less, the sum is set to 0 and + * the sample count to 0. + * @param samples sample count. + * @param sum sum value + */ + public MeanStatistic(final long samples, final long sum) { + if (samples > 0) { + this.sum = sum; + this.samples = samples; + } + } + + /** + * Create from another statistic. + * @param that source + */ + public MeanStatistic(MeanStatistic that) { + synchronized (that) { + set(that); + } + } + + /** + * Create an empty statistic. + */ + public MeanStatistic() { + } + + /** + * Get the sum of samples. + * @return the sum + */ + public synchronized long getSum() { + return sum; + } + + /** + * Get the sample count. + * @return the sample count; 0 means empty + */ + public synchronized long getSamples() { + return samples; + } + + /** + * Is a statistic empty? + * @return true if the sample count is 0 + */ + @JsonIgnore + public synchronized boolean isEmpty() { + return samples == 0; + } + + /** + * Set the values to 0. + */ + public void clear() { + setSamplesAndSum(0, 0); + } + + /** + * Set the sum and samples. + * Synchronized. + * @param sampleCount new sample count. + * @param newSum new sum + */ + public synchronized void setSamplesAndSum(long sampleCount, + long newSum) { + setSamples(sampleCount); + setSum(newSum); + } + + /** + * Set the statistic to the values of another. + * Synchronized. + * @param other the source. + */ + public void set(final MeanStatistic other) { + setSamplesAndSum(other.getSamples(), other.getSum()); + } + + /** + * Set the sum. + * @param sum new sum + */ + public synchronized void setSum(final long sum) { + this.sum = sum; + } + + /** + * Set the sample count. + * + * If this is less than zero, it is set to zero. + * This stops an ill-formed JSON entry from + * breaking deserialization, or get an invalid sample count + * into an entry. + * @param samples sample count. + */ + public synchronized void setSamples(final long samples) { + if (samples < 0) { + this.samples = 0; + } else { + this.samples = samples; + } + } + + /** + * Get the arithmetic mean value. + * @return the mean + */ + public synchronized double mean() { + return samples > 0 + ? ((double) sum) / samples + : 0.0d; + } + + /** + * Add another MeanStatistic. + * @param other other value + */ + public synchronized MeanStatistic add(final MeanStatistic other) { + if (other.isEmpty()) { + return this; + } + long otherSamples; + long otherSum; + synchronized (other) { + otherSamples = other.samples; + otherSum = other.sum; + } + if (isEmpty()) { + samples = otherSamples; + sum = otherSum; + return this; + } + samples += otherSamples; + sum += otherSum; + return this; + } + + /** + * Add a sample. + * Thread safe. + * @param value value to add to the sum + */ + public synchronized void addSample(long value) { + samples++; + sum += value; + } + + /** + * The hash code is derived from the mean + * and sample count: if either is changed + * the statistic cannot be used as a key + * for hash tables/maps. + * @return a hash value + */ + @Override + public synchronized int hashCode() { + return Objects.hash(sum, samples); + } + + @Override + public synchronized boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MeanStatistic that = (MeanStatistic) o; + if (isEmpty()) { + // if we are empty, then so must the other. + return that.isEmpty(); + } + return getSum() == that.getSum() && + getSamples() == that.getSamples(); + } + + @Override + public MeanStatistic clone() { + return copy(); + } + + /** + * Create a copy of this instance. + * @return copy. + * + */ + public MeanStatistic copy() { + return new MeanStatistic(this); + } + + @Override + public String toString() { + return String.format("(samples=%d, sum=%d, mean=%.4f)", + samples, sum, mean()); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java new file mode 100644 index 0000000000000..ef04feca6917f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StoreStatisticNames.java @@ -0,0 +1,355 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Common statistic names for object store operations.. + *

    + * When adding new common statistic name constants, please make them unique. + * By convention: + *

    + *
      + *
    • the name of the constants are uppercase, words separated by + * underscores.
    • + *
    • the value of the constants are lowercase of the constant names.
    • + *
    + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class StoreStatisticNames { + + /** {@value}. */ + public static final String OP_ABORT = "op_abort"; + + /** {@value}. */ + public static final String OP_APPEND = "op_append"; + + /** {@value}. */ + public static final String OP_COPY_FROM_LOCAL_FILE = + "op_copy_from_local_file"; + + /** {@value}. */ + public static final String OP_CREATE = "op_create"; + + /** {@value}. */ + public static final String OP_CREATE_NON_RECURSIVE = + "op_create_non_recursive"; + + /** {@value}. */ + public static final String OP_DELETE = "op_delete"; + + /** {@value}. */ + public static final String OP_EXISTS = "op_exists"; + + /** {@value}. */ + public static final String OP_GET_CONTENT_SUMMARY = + "op_get_content_summary"; + + /** {@value}. */ + public static final String OP_GET_DELEGATION_TOKEN = + "op_get_delegation_token"; + + /** {@value}. */ + public static final String OP_GET_FILE_CHECKSUM = + "op_get_file_checksum"; + + /** {@value}. */ + public static final String OP_GET_FILE_STATUS = "op_get_file_status"; + + /** {@value}. */ + public static final String OP_GET_STATUS = "op_get_status"; + + /** {@value}. */ + public static final String OP_GLOB_STATUS = "op_glob_status"; + + /** {@value}. */ + public static final String OP_IS_FILE = "op_is_file"; + + /** {@value}. */ + public static final String OP_IS_DIRECTORY = "op_is_directory"; + + /** {@value}. */ + public static final String OP_LIST_FILES = "op_list_files"; + + /** {@value}. */ + public static final String OP_LIST_LOCATED_STATUS = + "op_list_located_status"; + + /** {@value}. */ + public static final String OP_LIST_STATUS = "op_list_status"; + + /** {@value}. */ + public static final String OP_MKDIRS = "op_mkdirs"; + + /** {@value}. */ + public static final String OP_MODIFY_ACL_ENTRIES = "op_modify_acl_entries"; + + /** {@value}. */ + public static final String OP_OPEN = "op_open"; + + /** {@value}. */ + public static final String OP_REMOVE_ACL = "op_remove_acl"; + + /** {@value}. */ + public static final String OP_REMOVE_ACL_ENTRIES = "op_remove_acl_entries"; + + /** {@value}. */ + public static final String OP_REMOVE_DEFAULT_ACL = "op_remove_default_acl"; + + /** {@value}. */ + public static final String OP_RENAME = "op_rename"; + + /** {@value}. */ + public static final String OP_SET_ACL = "op_set_acl"; + + /** {@value}. */ + public static final String OP_SET_OWNER = "op_set_owner"; + + /** {@value}. */ + public static final String OP_SET_PERMISSION = "op_set_permission"; + + /** {@value}. */ + public static final String OP_SET_TIMES = "op_set_times"; + + /** {@value}. */ + public static final String OP_TRUNCATE = "op_truncate"; + + /* The XAttr API */ + + /** Invoke {@code getXAttrs(Path path)}: {@value}. */ + public static final String OP_XATTR_GET_MAP = "op_xattr_get_map"; + + /** Invoke {@code getXAttr(Path, String)}: {@value}. */ + public static final String OP_XATTR_GET_NAMED = "op_xattr_get_named"; + + /** + * Invoke {@code getXAttrs(Path path, List names)}: {@value}. + */ + public static final String OP_XATTR_GET_NAMED_MAP = + "op_xattr_get_named_map"; + + /** Invoke {@code listXAttrs(Path path)}: {@value}. */ + public static final String OP_XATTR_LIST = "op_xattr_list"; + + + /** {@value}. */ + public static final String DELEGATION_TOKENS_ISSUED + = "delegation_tokens_issued"; + + /** Requests throttled and retried: {@value}. */ + public static final String STORE_IO_THROTTLED + = "store_io_throttled"; + + /** Requests made of a store: {@value}. */ + public static final String STORE_IO_REQUEST + = "store_io_request"; + + /** + * IO retried: {@value}. + */ + public static final String STORE_IO_RETRY + = "store_io_retry"; + + /** + * A store's equivalent of a paged LIST request was initiated: {@value}. + */ + public static final String OBJECT_LIST_REQUEST + = "object_list_request"; + + /** + * Number of continued object listings made. + * Value :{@value}. + */ + public static final String OBJECT_CONTINUE_LIST_REQUEST = + "object_continue_list_request"; + + /** + * A bulk DELETE request was made: {@value}. + * A separate statistic from {@link #OBJECT_DELETE_REQUEST} + * so that metrics on duration of the operations can + * be distinguished. + */ + public static final String OBJECT_BULK_DELETE_REQUEST + = "object_bulk_delete_request"; + + /** + * A store's equivalent of a DELETE request was made: {@value}. + * This may be an HTTP DELETE verb, or it may be some custom + * operation which takes a list of objects to delete. + */ + public static final String OBJECT_DELETE_REQUEST + = "object_delete_request"; + + /** + * The count of objects deleted in delete requests. + */ + public static final String OBJECT_DELETE_OBJECTS + = "object_delete_objects"; + + /** + * Object multipart upload initiated. + * Value :{@value}. + */ + public static final String OBJECT_MULTIPART_UPLOAD_INITIATED = + "object_multipart_initiated"; + + /** + * Object multipart upload aborted. + * Value :{@value}. + */ + public static final String OBJECT_MULTIPART_UPLOAD_ABORTED = + "object_multipart_aborted"; + + /** + * Object put/multipart upload count. + * Value :{@value}. + */ + public static final String OBJECT_PUT_REQUEST = + "object_put_request"; + + /** + * Object put/multipart upload completed count. + * Value :{@value}. + */ + public static final String OBJECT_PUT_REQUEST_COMPLETED = + "object_put_request_completed"; + + /** + * Current number of active put requests. + * Value :{@value}. + */ + public static final String OBJECT_PUT_REQUEST_ACTIVE = + "object_put_request_active"; + + /** + * number of bytes uploaded. + * Value :{@value}. + */ + public static final String OBJECT_PUT_BYTES = + "object_put_bytes"; + + /** + * number of bytes queued for upload/being actively uploaded. + * Value :{@value}. + */ + public static final String OBJECT_PUT_BYTES_PENDING = + "object_put_bytes_pending"; + + /** + * Count of S3 Select (or similar) requests issued. + * Value :{@value}. + */ + public static final String OBJECT_SELECT_REQUESTS = + "object_select_requests"; + + /** + * Suffix to use for a minimum value when + * the same key is shared across min/mean/max + * statistics. + * + * Value {@value}. + */ + public static final String SUFFIX_MIN = ".min"; + + /** + * Suffix to use for a maximum value when + * the same key is shared across max/mean/max + * statistics. + * + * Value {@value}. + */ + public static final String SUFFIX_MAX = ".max"; + + /** + * Suffix to use for a mean statistic value when + * the same key is shared across mean/mean/max + * statistics. + * + * Value {@value}. + */ + public static final String SUFFIX_MEAN = ".mean"; + + /** + * String to add to counters and other stats to track failures. + * This comes before the .min/.mean//max suffixes. + * + * Value {@value}. + */ + public static final String SUFFIX_FAILURES = ".failures"; + + /** + * The name of the statistic collected for executor acquisition if + * a duration tracker factory is passed in to the constructor. + * {@value}. + */ + public static final String ACTION_EXECUTOR_ACQUIRED = + "action_executor_acquired"; + + /** + * An HTTP HEAD request was made: {@value}. + */ + public static final String ACTION_HTTP_HEAD_REQUEST + = "action_http_head_request"; + + /** + * An HTTP GET request was made: {@value}. + */ + public static final String ACTION_HTTP_GET_REQUEST + = "action_http_get_request"; + + /** + * An HTTP HEAD request was made: {@value}. + */ + public static final String OBJECT_METADATA_REQUESTS + = "object_metadata_request"; + + public static final String OBJECT_COPY_REQUESTS + = "object_copy_requests"; + + public static final String STORE_IO_THROTTLE_RATE + = "store_io_throttle_rate"; + + public static final String MULTIPART_UPLOAD_INSTANTIATED + = "multipart_instantiated"; + + public static final String MULTIPART_UPLOAD_PART_PUT + = "multipart_upload_part_put"; + + public static final String MULTIPART_UPLOAD_PART_PUT_BYTES + = "multipart_upload_part_put_bytes"; + + public static final String MULTIPART_UPLOAD_ABORTED + = "multipart_upload_aborted"; + + public static final String MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED + = "multipart_upload_abort_under_path_invoked"; + + public static final String MULTIPART_UPLOAD_COMPLETED + = "multipart_upload_completed"; + + public static final String MULTIPART_UPLOAD_STARTED + = "multipart_upload_started"; + + private StoreStatisticNames() { + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StreamStatisticNames.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StreamStatisticNames.java new file mode 100644 index 0000000000000..bbb8517118e16 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/StreamStatisticNames.java @@ -0,0 +1,364 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * These are common statistic names. + *

    + * When adding new common statistic name constants, please make them unique. + * By convention, they are implicitly unique: + *

      + *
    • + * The name of the constants are uppercase, words separated by + * underscores. + *
    • + *
    • + * The value of the constants are lowercase of the constant names. + *
    • + *
    + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class StreamStatisticNames { + + /** + * Count of times the TCP stream was aborted. + * Value: {@value}. + */ + public static final String STREAM_READ_ABORTED = "stream_aborted"; + + /** + * Bytes read from an input stream in read() calls. + * Does not include bytes read and then discarded in seek/close etc. + * These are the bytes returned to the caller. + * Value: {@value}. + */ + public static final String STREAM_READ_BYTES + = "stream_read_bytes"; + + /** + * Count of bytes discarded by aborting an input stream . + * Value: {@value}. + */ + public static final String STREAM_READ_BYTES_DISCARDED_ABORT + = "stream_read_bytes_discarded_in_abort"; + + /** + * Count of bytes read and discarded when closing an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_BYTES_DISCARDED_CLOSE + = "stream_read_bytes_discarded_in_close"; + + /** + * Count of times the TCP stream was closed. + * Value: {@value}. + */ + public static final String STREAM_READ_CLOSED = "stream_read_closed"; + + /** + * Total count of times an attempt to close an input stream was made + * Value: {@value}. + */ + public static final String STREAM_READ_CLOSE_OPERATIONS + = "stream_read_close_operations"; + + /** + * Total count of times an input stream to was opened. + * For object stores, that means the count a GET request was initiated. + * Value: {@value}. + */ + public static final String STREAM_READ_OPENED = "stream_read_opened"; + + /** + * Count of exceptions raised during input stream reads. + * Value: {@value}. + */ + public static final String STREAM_READ_EXCEPTIONS = + "stream_read_exceptions"; + + /** + * Count of readFully() operations in an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_FULLY_OPERATIONS + = "stream_read_fully_operations"; + + /** + * Count of read() operations in an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_OPERATIONS = + "stream_read_operations"; + + /** + * Count of incomplete read() operations in an input stream, + * that is, when the bytes returned were less than that requested. + * Value: {@value}. + */ + public static final String STREAM_READ_OPERATIONS_INCOMPLETE + = "stream_read_operations_incomplete"; + + /** + * Count of version mismatches encountered while reading an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_VERSION_MISMATCHES + = "stream_read_version_mismatches"; + + /** + * Count of executed seek operations which went backwards in a stream. + * Value: {@value}. + */ + public static final String STREAM_READ_SEEK_BACKWARD_OPERATIONS = + "stream_read_seek_backward_operations"; + + /** + * Count of bytes moved backwards during seek operations + * in an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_SEEK_BYTES_BACKWARDS + = "stream_read_bytes_backwards_on_seek"; + + /** + * Count of bytes read and discarded during seek() in an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_SEEK_BYTES_DISCARDED = + "stream_read_seek_bytes_discarded"; + + /** + * Count of bytes skipped during forward seek operations. + * Value: {@value}. + */ + public static final String STREAM_READ_SEEK_BYTES_SKIPPED + = "stream_read_seek_bytes_skipped"; + + /** + * Count of executed seek operations which went forward in + * an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_SEEK_FORWARD_OPERATIONS + = "stream_read_seek_forward_operations"; + + /** + * Count of times the seek policy was dynamically changed + * in an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_SEEK_POLICY_CHANGED = + "stream_read_seek_policy_changed"; + + /** + * Count of seek operations in an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_SEEK_OPERATIONS = + "stream_read_seek_operations"; + + /** + * Count of {@code InputStream.skip()} calls. + * Value: {@value}. + */ + public static final String STREAM_READ_SKIP_OPERATIONS = + "stream_read_skip_operations"; + + /** + * Count bytes skipped in {@code InputStream.skip()} calls. + * Value: {@value}. + */ + public static final String STREAM_READ_SKIP_BYTES = + "stream_read_skip_bytes"; + + /** + * Total count of bytes read from an input stream. + * Value: {@value}. + */ + public static final String STREAM_READ_TOTAL_BYTES + = "stream_read_total_bytes"; + + /** + * Count of calls of {@code CanUnbuffer.unbuffer()}. + * Value: {@value}. + */ + public static final String STREAM_READ_UNBUFFERED + = "stream_read_unbuffered"; + + /** + * "Count of stream write failures reported. + * Value: {@value}. + */ + public static final String STREAM_WRITE_EXCEPTIONS = + "stream_write_exceptions"; + + /** + * Count of failures when finalizing a multipart upload: + * {@value}. + */ + public static final String STREAM_WRITE_EXCEPTIONS_COMPLETING_UPLOADS = + "stream_write_exceptions_completing_upload"; + + /** + * Count of block/partition uploads complete. + * Value: {@value}. + */ + public static final String STREAM_WRITE_BLOCK_UPLOADS + = "stream_write_block_uploads"; + + /** + * Count of number of block uploads aborted. + * Value: {@value}. + */ + public static final String STREAM_WRITE_BLOCK_UPLOADS_ABORTED + = "stream_write_block_uploads_aborted"; + + /** + * Count of block/partition uploads active. + * Value: {@value}. + */ + public static final String STREAM_WRITE_BLOCK_UPLOADS_ACTIVE + = "stream_write_block_uploads_active"; + + /** + * Gauge of data queued to be written. + * Value: {@value}. + */ + public static final String STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING = + "stream_write_block_uploads_data_pending"; + + /** + * Count of number of block uploads committed. + * Value: {@value}. + */ + public static final String STREAM_WRITE_BLOCK_UPLOADS_COMMITTED + = "stream_write_block_uploads_committed"; + + /** + * Gauge of block/partitions uploads queued to be written. + * Value: {@value}. + */ + public static final String STREAM_WRITE_BLOCK_UPLOADS_PENDING + = "stream_write_block_uploads_pending"; + + + /** + * "Count of bytes written to output stream including all not yet uploaded. + * {@value}. + */ + public static final String STREAM_WRITE_BYTES + = "stream_write_bytes"; + + /** + * Count of total time taken for uploads to complete. + * {@value}. + */ + public static final String STREAM_WRITE_TOTAL_TIME + = "stream_write_total_time"; + + /** + * Total queue duration of all block uploads. + * {@value}. + */ + public static final String STREAM_WRITE_QUEUE_DURATION + = "stream_write_queue_duration"; + + public static final String STREAM_WRITE_TOTAL_DATA + = "stream_write_total_data"; + + /** + * Number of bytes to upload from an OutputStream. + */ + public static final String BYTES_TO_UPLOAD + = "bytes_upload"; + + /** + * Number of bytes uploaded successfully to the object store. + */ + public static final String BYTES_UPLOAD_SUCCESSFUL + = "bytes_upload_successfully"; + + /** + * Number of bytes failed to upload to the object store. + */ + public static final String BYTES_UPLOAD_FAILED + = "bytes_upload_failed"; + + /** + * Total time spent on waiting for a task to complete. + */ + public static final String TIME_SPENT_ON_TASK_WAIT + = "time_spent_task_wait"; + + /** + * Number of task queue shrunk operations. + */ + public static final String QUEUE_SHRUNK_OPS + = "queue_shrunk_ops"; + + /** + * Number of times current buffer is written to the service. + */ + public static final String WRITE_CURRENT_BUFFER_OPERATIONS + = "write_current_buffer_ops"; + + /** + * Total time spent on completing a PUT request. + */ + public static final String TIME_SPENT_ON_PUT_REQUEST + = "time_spent_on_put_request"; + + /** + * Number of seeks in buffer. + */ + public static final String SEEK_IN_BUFFER + = "seek_in_buffer"; + + /** + * Number of bytes read from the buffer. + */ + public static final String BYTES_READ_BUFFER + = "bytes_read_buffer"; + + /** + * Total number of remote read operations performed. + */ + public static final String REMOTE_READ_OP + = "remote_read_op"; + + /** + * Total number of bytes read from readAhead. + */ + public static final String READ_AHEAD_BYTES_READ + = "read_ahead_bytes_read"; + + /** + * Total number of bytes read from remote operations. + */ + public static final String REMOTE_BYTES_READ + = "remote_bytes_read"; + + private StreamStatisticNames() { + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/AbstractIOStatisticsImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/AbstractIOStatisticsImpl.java new file mode 100644 index 0000000000000..c701a509d8951 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/AbstractIOStatisticsImpl.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import org.apache.hadoop.fs.statistics.IOStatistics; + +/** + * Base implementation in case common methods/fields need to be added + * in future. + */ +public abstract class AbstractIOStatisticsImpl implements IOStatistics { + + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatistics.java new file mode 100644 index 0000000000000..50c2625c3513d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatistics.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.util.Collections; +import java.util.Map; +import java.util.function.Function; + +import org.apache.hadoop.fs.statistics.MeanStatistic; + +/** + * These statistics are dynamically evaluated by the supplied + * String -> type functions. + * + * This allows statistic sources to supply a list of callbacks used to + * generate the statistics on demand; similar to some of the Coda Hale metrics. + * + * The evaluation actually takes place during the iteration's {@code next()} + * call. + */ +final class DynamicIOStatistics + extends AbstractIOStatisticsImpl { + + /** + * Counter evaluators. + */ + private final EvaluatingStatisticsMap counters + = new EvaluatingStatisticsMap<>(); + + private final EvaluatingStatisticsMap gauges + = new EvaluatingStatisticsMap<>(); + + private final EvaluatingStatisticsMap minimums + = new EvaluatingStatisticsMap<>(); + + private final EvaluatingStatisticsMap maximums + = new EvaluatingStatisticsMap<>(); + + private final EvaluatingStatisticsMap meanStatistics + = new EvaluatingStatisticsMap<>(MeanStatistic::copy); + + DynamicIOStatistics() { + } + + @Override + public Map counters() { + return Collections.unmodifiableMap(counters); + } + + @Override + public Map gauges() { + return Collections.unmodifiableMap(gauges); + } + + @Override + public Map minimums() { + return Collections.unmodifiableMap(minimums); + } + + @Override + public Map maximums() { + return Collections.unmodifiableMap(maximums); + } + + @Override + public Map meanStatistics() { + return Collections.unmodifiableMap(meanStatistics); + } + + /** + * add a mapping of a key to a counter function. + * @param key the key + * @param eval the evaluator + */ + void addCounterFunction(String key, Function eval) { + counters.addFunction(key, eval); + } + + /** + * add a mapping of a key to a gauge function. + * @param key the key + * @param eval the evaluator + */ + void addGaugeFunction(String key, Function eval) { + gauges.addFunction(key, eval); + } + + /** + * add a mapping of a key to a minimum function. + * @param key the key + * @param eval the evaluator + */ + void addMinimumFunction(String key, Function eval) { + minimums.addFunction(key, eval); + } + + /** + * add a mapping of a key to a maximum function. + * @param key the key + * @param eval the evaluator + */ + void addMaximumFunction(String key, Function eval) { + maximums.addFunction(key, eval); + } + + /** + * add a mapping of a key to a meanStatistic function. + * @param key the key + * @param eval the evaluator + */ + void addMeanStatisticFunction(String key, + Function eval) { + meanStatistics.addFunction(key, eval); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatisticsBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatisticsBuilder.java new file mode 100644 index 0000000000000..47a317076dcf2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatisticsBuilder.java @@ -0,0 +1,248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; +import java.util.function.ToLongFunction; + +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.MeanStatistic; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; + +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkState; + +/** + * Builder of {@link DynamicIOStatistics}. + * + * Instantiate through + * {@link IOStatisticsBinding#dynamicIOStatistics()}. + */ +public class DynamicIOStatisticsBuilder { + + /** + * the instance being built up. Will be null after the (single) + * call to {@link #build()}. + */ + private DynamicIOStatistics instance = new DynamicIOStatistics(); + + /** + * Build the IOStatistics instance. + * @return an instance. + * @throws IllegalStateException if the builder has already been built. + */ + public IOStatistics build() { + final DynamicIOStatistics stats = activeInstance(); + // stop the builder from working any more. + instance = null; + return stats; + } + + /** + * Get the statistics instance. + * @return the instance to build/return + * @throws IllegalStateException if the builder has already been built. + */ + private DynamicIOStatistics activeInstance() { + checkState(instance != null, "Already built"); + return instance; + } + + /** + * Add a new evaluator to the counter statistics. + * @param key key of this statistic + * @param eval evaluator for the statistic + * @return the builder. + */ + public DynamicIOStatisticsBuilder withLongFunctionCounter(String key, + ToLongFunction eval) { + activeInstance().addCounterFunction(key, eval::applyAsLong); + return this; + } + + /** + * Add a counter statistic to dynamically return the + * latest value of the source. + * @param key key of this statistic + * @param source atomic long counter + * @return the builder. + */ + public DynamicIOStatisticsBuilder withAtomicLongCounter(String key, + AtomicLong source) { + withLongFunctionCounter(key, s -> source.get()); + return this; + } + + /** + * Add a counter statistic to dynamically return the + * latest value of the source. + * @param key key of this statistic + * @param source atomic int counter + * @return the builder. + */ + public DynamicIOStatisticsBuilder withAtomicIntegerCounter(String key, + AtomicInteger source) { + withLongFunctionCounter(key, s -> source.get()); + return this; + } + + /** + * Build a dynamic counter statistic from a + * {@link MutableCounterLong}. + * @param key key of this statistic + * @param source mutable long counter + * @return the builder. + */ + public DynamicIOStatisticsBuilder withMutableCounter(String key, + MutableCounterLong source) { + withLongFunctionCounter(key, s -> source.value()); + return this; + } + + /** + * Add a new evaluator to the gauge statistics. + * @param key key of this statistic + * @param eval evaluator for the statistic + * @return the builder. + */ + public DynamicIOStatisticsBuilder withLongFunctionGauge(String key, + ToLongFunction eval) { + activeInstance().addGaugeFunction(key, eval::applyAsLong); + return this; + } + + /** + * Add a gauge statistic to dynamically return the + * latest value of the source. + * @param key key of this statistic + * @param source atomic long gauge + * @return the builder. + */ + public DynamicIOStatisticsBuilder withAtomicLongGauge(String key, + AtomicLong source) { + withLongFunctionGauge(key, s -> source.get()); + return this; + } + + /** + * Add a gauge statistic to dynamically return the + * latest value of the source. + * @param key key of this statistic + * @param source atomic int gauge + * @return the builder. + */ + public DynamicIOStatisticsBuilder withAtomicIntegerGauge(String key, + AtomicInteger source) { + withLongFunctionGauge(key, s -> source.get()); + return this; + } + + /** + * Add a new evaluator to the minimum statistics. + * @param key key of this statistic + * @param eval evaluator for the statistic + * @return the builder. + */ + public DynamicIOStatisticsBuilder withLongFunctionMinimum(String key, + ToLongFunction eval) { + activeInstance().addMinimumFunction(key, eval::applyAsLong); + return this; + } + + /** + * Add a minimum statistic to dynamically return the + * latest value of the source. + * @param key key of this statistic + * @param source atomic long minimum + * @return the builder. + */ + public DynamicIOStatisticsBuilder withAtomicLongMinimum(String key, + AtomicLong source) { + withLongFunctionMinimum(key, s -> source.get()); + return this; + } + + /** + * Add a minimum statistic to dynamically return the + * latest value of the source. + * @param key key of this statistic + * @param source atomic int minimum + * @return the builder. + */ + public DynamicIOStatisticsBuilder withAtomicIntegerMinimum(String key, + AtomicInteger source) { + withLongFunctionMinimum(key, s -> source.get()); + return this; + } + + + /** + * Add a new evaluator to the maximum statistics. + * @param key key of this statistic + * @param eval evaluator for the statistic + * @return the builder. + */ + public DynamicIOStatisticsBuilder withLongFunctionMaximum(String key, + ToLongFunction eval) { + activeInstance().addMaximumFunction(key, eval::applyAsLong); + return this; + } + + /** + * Add a maximum statistic to dynamically return the + * latest value of the source. + * @param key key of this statistic + * @param source atomic long maximum + * @return the builder. + */ + public DynamicIOStatisticsBuilder withAtomicLongMaximum(String key, + AtomicLong source) { + withLongFunctionMaximum(key, s -> source.get()); + return this; + } + + /** + * Add a maximum statistic to dynamically return the + * latest value of the source. + * @param key key of this statistic + * @param source atomic int maximum + * @return the builder. + */ + public DynamicIOStatisticsBuilder withAtomicIntegerMaximum(String key, + AtomicInteger source) { + withLongFunctionMaximum(key, s -> source.get()); + return this; + } + + /** + * Add a new evaluator to the mean statistics. + * + * This is a function which must return the mean and the sample count. + * @param key key of this statistic + * @param eval evaluator for the statistic + * @return the builder. + */ + public DynamicIOStatisticsBuilder withMeanStatisticFunction(String key, + Function eval) { + activeInstance().addMeanStatisticFunction(key, eval); + return this; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EmptyIOStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EmptyIOStatistics.java new file mode 100644 index 0000000000000..f474fc209771c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EmptyIOStatistics.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.util.Map; + +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.MeanStatistic; + +import static java.util.Collections.emptyMap; + +/** + * An empty IO Statistics implementation for classes which always + * want to return a non-null set of statistics. + */ +final class EmptyIOStatistics extends AbstractIOStatisticsImpl { + + /** + * The sole instance of this class. + */ + private static final EmptyIOStatistics INSTANCE = new EmptyIOStatistics(); + + private EmptyIOStatistics() { + } + + @Override + public Map counters() { + return emptyMap(); + } + + @Override + public Map gauges() { + return emptyMap(); + } + + @Override + public Map minimums() { + return emptyMap(); + } + + @Override + public Map maximums() { + return emptyMap(); + } + + @Override + public Map meanStatistics() { + return emptyMap(); + } + + /** + * Get the single instance of this class. + * @return a shared, empty instance. + */ + public static IOStatistics getInstance() { + return INSTANCE; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EvaluatingStatisticsMap.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EvaluatingStatisticsMap.java new file mode 100644 index 0000000000000..e4680f2d81fa0 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/EvaluatingStatisticsMap.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.io.Serializable; +import java.util.Collection; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * A map of functions which can be invoked to dynamically + * create the value of an entry. + * @param type of entry value. + */ +final class EvaluatingStatisticsMap implements + Map { + + /** + * Functions to invoke when evaluating keys. + */ + private final Map> evaluators + = new ConcurrentHashMap<>(); + + /** + * Function to use when copying map values. + */ + private final Function copyFn; + + /** + * Construct with the copy function being simple passthrough. + */ + EvaluatingStatisticsMap() { + this(IOStatisticsBinding::passthroughFn); + } + + /** + * Construct with the copy function being that supplied in. + * @param copyFn copy function. + */ + EvaluatingStatisticsMap(final Function copyFn) { + this.copyFn = copyFn; + } + + /** + * add a mapping of a key to a function. + * @param key the key + * @param eval the evaluator + */ + void addFunction(String key, Function eval) { + evaluators.put(key, eval); + } + + @Override + public int size() { + return evaluators.size(); + } + + @Override + public boolean isEmpty() { + return evaluators.isEmpty(); + } + + @Override + public boolean containsKey(final Object key) { + return evaluators.containsKey(key); + } + + @Override + public boolean containsValue(final Object value) { + throw new UnsupportedOperationException(); + } + + @Override + public E get(final Object key) { + Function fn = evaluators.get(key); + return fn != null + ? fn.apply((String) key) + : null; + } + + @Override + public E put(final String key, final E value) { + throw new UnsupportedOperationException(); + } + + @Override + public E remove(final Object key) { + throw new UnsupportedOperationException(); + } + + @Override + public void putAll(final Map m) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public Set keySet() { + return evaluators.keySet(); + } + + /** + * Evaluate all the entries and provide a list of the results. + * + * This is not a snapshot, so if the evaluators actually return + * references to mutable objects (e.g. a MeanStatistic instance) + * then that value may still change. + * @return the current list of evaluated results. + */ + @Override + public Collection values() { + Set>> evalEntries = + evaluators.entrySet(); + return evalEntries.parallelStream().map((e) -> + e.getValue().apply(e.getKey())) + .collect(Collectors.toList()); + } + + /** + * Take a snapshot. + * @return a map snapshot. + */ + public Map snapshot() { + return IOStatisticsBinding.snapshotMap(this, copyFn); + } + + /** + * Creating the entry set forces an evaluation of the functions. + * + * This is not a snapshot, so if the evaluators actually return + * references to mutable objects (e.g. a MeanStatistic instance) + * then that value may still change. + * + * The evaluation may be parallelized. + * @return an evaluated set of values + */ + @Override + public synchronized Set> entrySet() { + Set>> evalEntries = + evaluators.entrySet(); + Set> r = evalEntries.parallelStream().map((e) -> + new EntryImpl<>(e.getKey(), e.getValue().apply(e.getKey()))) + .collect(Collectors.toSet()); + return r; + } + + /** + * Simple entry. + * @param entry type + */ + private static final class EntryImpl implements Entry { + + private String key; + + private E value; + + private EntryImpl(final String key, final E value) { + this.key = key; + this.value = value; + } + + @Override + public String getKey() { + return key; + } + + @Override + public E getValue() { + return value; + } + + @Override + public E setValue(final E val) { + this.value = val; + return val; + } + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsBinding.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsBinding.java new file mode 100644 index 0000000000000..c3507dbc73ef4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsBinding.java @@ -0,0 +1,616 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.Serializable; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiFunction; +import java.util.function.Function; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.MeanStatistic; +import org.apache.hadoop.util.functional.CallableRaisingIOE; +import org.apache.hadoop.util.functional.ConsumerRaisingIOE; +import org.apache.hadoop.util.functional.FunctionRaisingIOE; +import org.apache.hadoop.util.functional.InvocationRaisingIOE; + +import static org.apache.hadoop.fs.statistics.IOStatistics.MIN_UNSET_VALUE; +import static org.apache.hadoop.fs.statistics.impl.StubDurationTracker.STUB_DURATION_TRACKER; + +/** + * Support for implementing IOStatistics interfaces. + */ +public final class IOStatisticsBinding { + + /** Pattern used for each entry. */ + public static final String ENTRY_PATTERN = "(%s=%s)"; + + /** String to return when a source is null. */ + @VisibleForTesting + public static final String NULL_SOURCE = "()"; + + private IOStatisticsBinding() { + } + + /** + * Create IOStatistics from a storage statistics instance. + * + * This will be updated as the storage statistics change. + * @param storageStatistics source data. + * @return an IO statistics source. + */ + public static IOStatistics fromStorageStatistics( + StorageStatistics storageStatistics) { + DynamicIOStatisticsBuilder builder = dynamicIOStatistics(); + Iterator it = storageStatistics + .getLongStatistics(); + while (it.hasNext()) { + StorageStatistics.LongStatistic next = it.next(); + builder.withLongFunctionCounter(next.getName(), + k -> storageStatistics.getLong(k)); + } + return builder.build(); + } + + /** + * Create a builder for dynamic IO Statistics. + * @return a builder to be completed. + */ + public static DynamicIOStatisticsBuilder dynamicIOStatistics() { + return new DynamicIOStatisticsBuilder(); + } + + /** + * Get the shared instance of the immutable empty statistics + * object. + * @return an empty statistics object. + */ + public static IOStatistics emptyStatistics() { + return EmptyIOStatistics.getInstance(); + } + + /** + * Take an IOStatistics instance and wrap it in a source. + * @param statistics statistics. + * @return a source which will return the values + */ + public static IOStatisticsSource wrap(IOStatistics statistics) { + return new SourceWrappedStatistics(statistics); + } + + /** + * Create a builder for an {@link IOStatisticsStore}. + * + * @return a builder instance. + */ + public static IOStatisticsStoreBuilder iostatisticsStore() { + return new IOStatisticsStoreBuilderImpl(); + } + + /** + * Convert an entry to the string format used in logging. + * + * @param entry entry to evaluate + * @param entry type + * @return formatted string + */ + public static String entryToString( + final Map.Entry entry) { + return entryToString(entry.getKey(), entry.getValue()); + } + + /** + * Convert entry values to the string format used in logging. + * + * @param name statistic name + * @param value stat value + * @return formatted string + */ + public static String entryToString( + final String name, final E value) { + return String.format( + ENTRY_PATTERN, + name, + value); + } + + /** + * Copy into the dest map all the source entries. + * The destination is cleared first. + * @param entry type + * @param dest destination of the copy + * @param source source + * @param copyFn function to copy entries + * @return the destination. + */ + private static Map copyMap( + Map dest, + Map source, + Function copyFn) { + // we have to clone the values so that they aren't + // bound to the original values + dest.clear(); + source.entrySet() + .forEach(entry -> + dest.put(entry.getKey(), copyFn.apply(entry.getValue()))); + return dest; + } + + /** + * A passthrough copy operation suitable for immutable + * types, including numbers. + * @param src source object + * @return the source object + */ + public static E passthroughFn(E src) { + return src; + } + + /** + * Take a snapshot of a supplied map, where the copy option simply + * uses the existing value. + * + * For this to be safe, the map must refer to immutable objects. + * @param source source map + * @param type of values. + * @return a new map referencing the same values. + */ + public static Map snapshotMap( + Map source) { + return snapshotMap(source, + IOStatisticsBinding::passthroughFn); + } + + /** + * Take a snapshot of a supplied map, using the copy function + * to replicate the source values. + * @param source source map + * @param copyFn function to copy the value + * @param type of values. + * @return a concurrent hash map referencing the same values. + */ + public static + ConcurrentHashMap snapshotMap( + Map source, + Function copyFn) { + ConcurrentHashMap dest = new ConcurrentHashMap<>(); + copyMap(dest, source, copyFn); + return dest; + } + + /** + * Aggregate two maps so that the destination. + * @param type of values + * @param dest destination map. + * @param other other map + * @param aggregateFn function to aggregate the values. + * @param copyFn function to copy the value + */ + public static void aggregateMaps( + Map dest, + Map other, + BiFunction aggregateFn, + Function copyFn) { + // scan through the other hand map; copy + // any values not in the left map, + // aggregate those for which there is already + // an entry + other.entrySet().forEach(entry -> { + String key = entry.getKey(); + E rVal = entry.getValue(); + E lVal = dest.get(key); + if (lVal == null) { + dest.put(key, copyFn.apply(rVal)); + } else { + dest.put(key, aggregateFn.apply(lVal, rVal)); + } + }); + } + + /** + * Aggregate two counters. + * @param l left value + * @param r right value + * @return the aggregate value + */ + public static Long aggregateCounters(Long l, Long r) { + return Math.max(l, 0) + Math.max(r, 0); + } + + /** + * Add two gauges. + * @param l left value + * @param r right value + * @return aggregate value + */ + public static Long aggregateGauges(Long l, Long r) { + return l + r; + } + + + /** + * Aggregate two minimum values. + * @param l left + * @param r right + * @return the new minimum. + */ + public static Long aggregateMinimums(Long l, Long r) { + if (l == MIN_UNSET_VALUE) { + return r; + } else if (r == MIN_UNSET_VALUE) { + return l; + } else { + return Math.min(l, r); + } + } + + /** + * Aggregate two maximum values. + * @param l left + * @param r right + * @return the new minimum. + */ + public static Long aggregateMaximums(Long l, Long r) { + if (l == MIN_UNSET_VALUE) { + return r; + } else if (r == MIN_UNSET_VALUE) { + return l; + } else { + return Math.max(l, r); + } + } + + /** + * Aggregate the mean statistics. + * This returns a new instance. + * @param l left value + * @param r right value + * @return aggregate value + */ + public static MeanStatistic aggregateMeanStatistics( + MeanStatistic l, MeanStatistic r) { + MeanStatistic res = l.copy(); + res.add(r); + return res; + } + + /** + * Update a maximum value tracked in an atomic long. + * This is thread safe -it uses compareAndSet to ensure + * that Thread T1 whose sample is greater than the current + * value never overwrites an update from thread T2 whose + * sample was also higher -and which completed first. + * @param dest destination for all changes. + * @param sample sample to update. + */ + public static void maybeUpdateMaximum(AtomicLong dest, long sample) { + boolean done; + do { + long current = dest.get(); + if (sample > current) { + done = dest.compareAndSet(current, sample); + } else { + done = true; + } + } while (!done); + } + + /** + * Update a maximum value tracked in an atomic long. + * This is thread safe -it uses compareAndSet to ensure + * that Thread T1 whose sample is greater than the current + * value never overwrites an update from thread T2 whose + * sample was also higher -and which completed first. + * @param dest destination for all changes. + * @param sample sample to update. + */ + public static void maybeUpdateMinimum(AtomicLong dest, long sample) { + boolean done; + do { + long current = dest.get(); + if (current == MIN_UNSET_VALUE || sample < current) { + done = dest.compareAndSet(current, sample); + } else { + done = true; + } + } while (!done); + } + + /** + * Given an IOException raising function/lambda expression, + * return a new one which wraps the inner and tracks + * the duration of the operation, including whether + * it passes/fails. + * @param factory factory of duration trackers + * @param statistic statistic key + * @param inputFn input function + * @param type of argument to the input function. + * @param return type. + * @return a new function which tracks duration and failure. + */ + public static FunctionRaisingIOE trackFunctionDuration( + @Nullable DurationTrackerFactory factory, + String statistic, + FunctionRaisingIOE inputFn) { + return (x) -> { + // create the tracker outside try-with-resources so + // that failures can be set in the catcher. + DurationTracker tracker = createTracker(factory, statistic); + try { + // exec the input function and return its value + return inputFn.apply(x); + } catch (IOException | RuntimeException e) { + // input function failed: note it + tracker.failed(); + // and rethrow + throw e; + } finally { + // update the tracker. + // this is called after the catch() call would have + // set the failed flag. + tracker.close(); + } + }; + } + + /** + * Given a java function/lambda expression, + * return a new one which wraps the inner and tracks + * the duration of the operation, including whether + * it passes/fails. + * @param factory factory of duration trackers + * @param statistic statistic key + * @param inputFn input function + * @param type of argument to the input function. + * @param return type. + * @return a new function which tracks duration and failure. + */ + public static Function trackJavaFunctionDuration( + @Nullable DurationTrackerFactory factory, + String statistic, + Function inputFn) { + return (x) -> { + // create the tracker outside try-with-resources so + // that failures can be set in the catcher. + DurationTracker tracker = createTracker(factory, statistic); + try { + // exec the input function and return its value + return inputFn.apply(x); + } catch (RuntimeException e) { + // input function failed: note it + tracker.failed(); + // and rethrow + throw e; + } finally { + // update the tracker. + // this is called after the catch() call would have + // set the failed flag. + tracker.close(); + } + }; + } + + /** + * Given an IOException raising callable/lambda expression, + * execute it and update the relevant statistic. + * @param factory factory of duration trackers + * @param statistic statistic key + * @param input input callable. + * @param return type. + * @return the result of the operation. + */ + public static B trackDuration( + DurationTrackerFactory factory, + String statistic, + CallableRaisingIOE input) throws IOException { + return trackDurationOfOperation(factory, statistic, input).apply(); + } + + /** + * Given an IOException raising callable/lambda expression, + * execute it and update the relevant statistic. + * @param factory factory of duration trackers + * @param statistic statistic key + * @param input input callable. + */ + public static void trackDurationOfInvocation( + DurationTrackerFactory factory, + String statistic, + InvocationRaisingIOE input) throws IOException { + + // create the tracker outside try-with-resources so + // that failures can be set in the catcher. + DurationTracker tracker = createTracker(factory, statistic); + try { + // exec the input function and return its value + input.apply(); + } catch (IOException | RuntimeException e) { + // input function failed: note it + tracker.failed(); + // and rethrow + throw e; + } finally { + // update the tracker. + // this is called after the catch() call would have + // set the failed flag. + tracker.close(); + } + } + + /** + * Given an IOException raising callable/lambda expression, + * return a new one which wraps the inner and tracks + * the duration of the operation, including whether + * it passes/fails. + * @param factory factory of duration trackers + * @param statistic statistic key + * @param input input callable. + * @param return type. + * @return a new callable which tracks duration and failure. + */ + public static CallableRaisingIOE trackDurationOfOperation( + @Nullable DurationTrackerFactory factory, + String statistic, + CallableRaisingIOE input) { + return () -> { + // create the tracker outside try-with-resources so + // that failures can be set in the catcher. + DurationTracker tracker = createTracker(factory, statistic); + try { + // exec the input function and return its value + return input.apply(); + } catch (IOException | RuntimeException e) { + // input function failed: note it + tracker.failed(); + // and rethrow + throw e; + } finally { + // update the tracker. + // this is called after the catch() call would have + // set the failed flag. + tracker.close(); + } + }; + } + + /** + * Given an IOException raising Consumer, + * return a new one which wraps the inner and tracks + * the duration of the operation, including whether + * it passes/fails. + * @param factory factory of duration trackers + * @param statistic statistic key + * @param input input callable. + * @param return type. + * @return a new consumer which tracks duration and failure. + */ + public static ConsumerRaisingIOE trackDurationConsumer( + @Nullable DurationTrackerFactory factory, + String statistic, + ConsumerRaisingIOE input) { + return (B t) -> { + // create the tracker outside try-with-resources so + // that failures can be set in the catcher. + DurationTracker tracker = createTracker(factory, statistic); + try { + // exec the input function and return its value + input.accept(t); + } catch (IOException | RuntimeException e) { + // input function failed: note it + tracker.failed(); + // and rethrow + throw e; + } finally { + // update the tracker. + // this is called after the catch() call would have + // set the failed flag. + tracker.close(); + } + }; + } + + /** + * Given a callable/lambda expression, + * return a new one which wraps the inner and tracks + * the duration of the operation, including whether + * it passes/fails. + * @param factory factory of duration trackers + * @param statistic statistic key + * @param input input callable. + * @param return type. + * @return a new callable which tracks duration and failure. + */ + public static Callable trackDurationOfCallable( + @Nullable DurationTrackerFactory factory, + String statistic, + Callable input) { + return () -> { + // create the tracker outside try-with-resources so + // that failures can be set in the catcher. + DurationTracker tracker = createTracker(factory, statistic); + try { + // exec the input function and return its value + return input.call(); + } catch (RuntimeException e) { + // input function failed: note it + tracker.failed(); + // and rethrow + throw e; + } finally { + // update the tracker. + // this is called after any catch() call will have + // set the failed flag. + tracker.close(); + } + }; + } + + /** + * Create the tracker. If the factory is null, a stub + * tracker is returned. + * @param factory tracker factory + * @param statistic statistic to track + * @return a duration tracker. + */ + private static DurationTracker createTracker( + @Nullable final DurationTrackerFactory factory, + final String statistic) { + return factory != null + ? factory.trackDuration(statistic) + : STUB_DURATION_TRACKER; + } + + /** + * Create a DurationTrackerFactory which aggregates the tracking + * of two other factories. + * @param first first tracker factory + * @param second second tracker factory + * @return a factory + */ + public static DurationTrackerFactory pairedTrackerFactory( + final DurationTrackerFactory first, + final DurationTrackerFactory second) { + return new PairedDurationTrackerFactory(first, second); + } + + /** + * Publish the IOStatistics as a set of storage statistics. + * This is dynamic. + * @param name storage statistics name. + * @param scheme FS scheme; may be null. + * @param source IOStatistics source. + * @return a dynamic storage statistics object. + */ + public static StorageStatistics publishAsStorageStatistics( + String name, String scheme, IOStatistics source) { + return new StorageStatisticsFromIOStatistics(name, scheme, source); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStore.java new file mode 100644 index 0000000000000..1b4139e463a9e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStore.java @@ -0,0 +1,258 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.time.Duration; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsAggregator; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; +import org.apache.hadoop.fs.statistics.MeanStatistic; + +/** + * Interface of an IOStatistics store intended for + * use in classes which track statistics for reporting. + */ +public interface IOStatisticsStore extends IOStatistics, + IOStatisticsAggregator, + DurationTrackerFactory { + + /** + * Increment a counter by one. + * + * No-op if the counter is unknown. + * @param key statistics key + * @return old value or, if the counter is unknown: 0 + */ + default long incrementCounter(String key) { + return incrementCounter(key, 1); + } + + /** + * Increment a counter. + * + * No-op if the counter is unknown. + * If the value is negative, it is ignored. + * @param key statistics key + * @param value value to increment + * @return the updated value or, if the counter is unknown: 0 + */ + long incrementCounter(String key, long value); + + /** + * Set a counter. + * + * No-op if the counter is unknown. + * @param key statistics key + * @param value value to set + */ + void setCounter(String key, long value); + + /** + * Set a gauge. + * + * No-op if the gauge is unknown. + * @param key statistics key + * @param value value to set + */ + void setGauge(String key, long value); + + /** + * Increment a gauge. + *

    + * No-op if the gauge is unknown. + *

    + * @param key statistics key + * @param value value to increment + * @return new value or 0 if the key is unknown + */ + long incrementGauge(String key, long value); + + /** + * Set a maximum. + * No-op if the maximum is unknown. + * @param key statistics key + * @param value value to set + */ + void setMaximum(String key, long value); + + /** + * Increment a maximum. + *

    + * No-op if the maximum is unknown. + *

    + * @param key statistics key + * @param value value to increment + * @return new value or 0 if the key is unknown + */ + long incrementMaximum(String key, long value); + + /** + * Set a minimum. + *

    + * No-op if the minimum is unknown. + *

    + * @param key statistics key + * @param value value to set + */ + void setMinimum(String key, long value); + + /** + * Increment a minimum. + *

    + * No-op if the minimum is unknown. + *

    + * @param key statistics key + * @param value value to increment + * @return new value or 0 if the key is unknown + */ + long incrementMinimum(String key, long value); + + /** + * Add a minimum sample: if less than the current value, + * updates the value. + *

    + * No-op if the minimum is unknown. + *

    + * @param key statistics key + * @param value sample value + */ + void addMinimumSample(String key, long value); + + /** + * Add a maximum sample: if greater than the current value, + * updates the value. + *

    + * No-op if the key is unknown. + *

    + * @param key statistics key + * @param value sample value + */ + void addMaximumSample(String key, long value); + + /** + * Set a mean statistic to a given value. + *

    + * No-op if the key is unknown. + *

    + * @param key statistic key + * @param value new value. + */ + void setMeanStatistic(String key, MeanStatistic value); + + /** + * Add a sample to the mean statistics. + *

    + * No-op if the key is unknown. + *

    + * @param key key + * @param value sample value. + */ + void addMeanStatisticSample(String key, long value); + + /** + * Reset all statistics. + * Unsynchronized. + */ + void reset(); + + /** + * Get a reference to the atomic instance providing the + * value for a specific counter. This is useful if + * the value is passed around. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + AtomicLong getCounterReference(String key); + + /** + * Get a reference to the atomic instance providing the + * value for a specific maximum. This is useful if + * the value is passed around. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + AtomicLong getMaximumReference(String key); + + /** + * Get a reference to the atomic instance providing the + * value for a specific minimum. This is useful if + * the value is passed around. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + AtomicLong getMinimumReference(String key); + + /** + * Get a reference to the atomic instance providing the + * value for a specific gauge. This is useful if + * the value is passed around. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + AtomicLong getGaugeReference(String key); + + /** + * Get a reference to the atomic instance providing the + * value for a specific meanStatistic. This is useful if + * the value is passed around. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + MeanStatistic getMeanStatistic(String key); + + /** + * Add a duration to the min/mean/max statistics, using the + * given prefix and adding a suffix for each specific value. + * + * The update is not-atomic, even though each individual statistic + * is updated thread-safely. If two threads update the values + * simultaneously, at the end of each operation the state will + * be correct. It is only during the sequence that the statistics + * may be observably inconsistent. + * @param prefix statistic prefix + * @param durationMillis duration in milliseconds. + */ + void addTimedOperation(String prefix, long durationMillis); + + /** + * Add a duration to the min/mean/max statistics, using the + * given prefix and adding a suffix for each specific value.; + * increment tha counter whose name == prefix. + * + * If any of the statistics are not registered, that part of + * the sequence will be omitted -the rest will proceed. + * + * The update is not-atomic, even though each individual statistic + * is updated thread-safely. If two threads update the values + * simultaneously, at the end of each operation the state will + * be correct. It is only during the sequence that the statistics + * may be observably inconsistent. + * @param prefix statistic prefix + * @param duration duration + */ + void addTimedOperation(String prefix, Duration duration); + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreBuilder.java new file mode 100644 index 0000000000000..d94a8389b7ff8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreBuilder.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +/** + * Builder of the {@link IOStatisticsStore} implementation. + */ +public interface IOStatisticsStoreBuilder { + + /** + * Declare a varargs list of counters to add. + * @param keys names of statistics. + * @return this builder. + */ + IOStatisticsStoreBuilder withCounters(String... keys); + + /** + * Declare a varargs list of gauges to add. + * @param keys names of statistics. + * @return this builder. + */ + IOStatisticsStoreBuilder withGauges(String... keys); + + /** + * Declare a varargs list of maximums to add. + * @param keys names of statistics. + * @return this builder. + */ + IOStatisticsStoreBuilder withMaximums(String... keys); + + /** + * Declare a varargs list of minimums to add. + * @param keys names of statistics. + * @return this builder. + */ + IOStatisticsStoreBuilder withMinimums(String... keys); + + /** + * Declare a varargs list of means to add. + * @param keys names of statistics. + * @return this builder. + */ + IOStatisticsStoreBuilder withMeanStatistics(String... keys); + + /** + * Add a statistic in the counter, min, max and mean maps for each + * declared statistic prefix. + * @param prefixes prefixes for the stats. + * @return this + */ + IOStatisticsStoreBuilder withDurationTracking( + String... prefixes); + + /** + * Build the collector. + * @return a new collector. + */ + IOStatisticsStore build(); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreBuilderImpl.java new file mode 100644 index 0000000000000..0562271db3ef8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreBuilderImpl.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_FAILURES; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MAX; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MEAN; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MIN; + +/** + * Builder for an IOStatistics store.. + */ +final class IOStatisticsStoreBuilderImpl implements + IOStatisticsStoreBuilder { + + private final List counters = new ArrayList<>(); + + private final List gauges = new ArrayList<>(); + + private final List minimums = new ArrayList<>(); + + private final List maximums = new ArrayList<>(); + + private final List meanStatistics = new ArrayList<>(); + + @Override + public IOStatisticsStoreBuilderImpl withCounters(final String... keys) { + counters.addAll(Arrays.asList(keys)); + return this; + } + + @Override + public IOStatisticsStoreBuilderImpl withGauges(final String... keys) { + gauges.addAll(Arrays.asList(keys)); + return this; + } + + @Override + public IOStatisticsStoreBuilderImpl withMaximums(final String... keys) { + maximums.addAll(Arrays.asList(keys)); + return this; + } + + @Override + public IOStatisticsStoreBuilderImpl withMinimums(final String... keys) { + minimums.addAll(Arrays.asList(keys)); + return this; + } + + @Override + public IOStatisticsStoreBuilderImpl withMeanStatistics( + final String... keys) { + meanStatistics.addAll(Arrays.asList(keys)); + return this; + } + + @Override + public IOStatisticsStoreBuilderImpl withDurationTracking( + final String... prefixes) { + for (String p : prefixes) { + withCounters(p, p + SUFFIX_FAILURES); + withMinimums( + p + SUFFIX_MIN, + p + SUFFIX_FAILURES + SUFFIX_MIN); + withMaximums( + p + SUFFIX_MAX, + p + SUFFIX_FAILURES + SUFFIX_MAX); + withMeanStatistics( + p + SUFFIX_MEAN, + p + SUFFIX_FAILURES + SUFFIX_MEAN); + } + return this; + } + + @Override + public IOStatisticsStore build() { + return new IOStatisticsStoreImpl(counters, gauges, minimums, + maximums, meanStatistics); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreImpl.java new file mode 100644 index 0000000000000..0471703b3b040 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/IOStatisticsStoreImpl.java @@ -0,0 +1,469 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.MeanStatistic; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.stubDurationTracker; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MAX; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MEAN; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MIN; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.aggregateMaximums; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.aggregateMinimums; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.dynamicIOStatistics; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.maybeUpdateMaximum; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.maybeUpdateMinimum; + +/** + * Implementation of {@link IOStatisticsStore}. + *

    + * A ConcurrentHashMap of each set of statistics is created; + * the AtomicLong/MeanStatistic entries are fetched as required. + * When the statistics are updated, the referenced objects + * are updated rather than new values set in the map. + *

    + */ +final class IOStatisticsStoreImpl extends WrappedIOStatistics + implements IOStatisticsStore { + + /** + * Log changes at debug. + * Noisy, but occasionally useful. + */ + private static final Logger LOG = + LoggerFactory.getLogger(IOStatisticsStoreImpl.class); + + /** All the counters are atomic longs. */ + private final Map counterMap = new ConcurrentHashMap<>(); + + /** All the gauges are atomic longs. */ + private final Map gaugeMap = new ConcurrentHashMap<>(); + + /** All the minimum values are atomic longs. */ + private final Map minimumMap = new ConcurrentHashMap<>(); + + /** All the maximum values are atomic longs. */ + private final Map maximumMap = new ConcurrentHashMap<>(); + + /** + * The mean statistics. + * Relies on the MeanStatistic operations being synchronized. + */ + private final Map meanStatisticMap + = new ConcurrentHashMap<>(); + + /** + * Constructor invoked via the builder. + * @param counters keys to use for the counter statistics. + * @param gauges names of gauges + * @param minimums names of minimums + * @param maximums names of maximums + * @param meanStatistics names of mean statistics. + */ + IOStatisticsStoreImpl( + final List counters, + final List gauges, + final List minimums, + final List maximums, + final List meanStatistics) { + // initially create the superclass with no wrapped mapping; + super(null); + + // now construct a dynamic statistics source mapping to + // the various counters, gauges etc dynamically created + // into maps + DynamicIOStatisticsBuilder builder = dynamicIOStatistics(); + if (counters != null) { + for (String key : counters) { + AtomicLong counter = new AtomicLong(); + counterMap.put(key, counter); + builder.withAtomicLongCounter(key, counter); + } + } + if (gauges != null) { + for (String key : gauges) { + AtomicLong gauge = new AtomicLong(); + gaugeMap.put(key, gauge); + builder.withAtomicLongGauge(key, gauge); + } + } + if (maximums != null) { + for (String key : maximums) { + AtomicLong maximum = new AtomicLong(MAX_UNSET_VALUE); + maximumMap.put(key, maximum); + builder.withAtomicLongMaximum(key, maximum); + } + } + if (minimums != null) { + for (String key : minimums) { + AtomicLong minimum = new AtomicLong(MIN_UNSET_VALUE); + minimumMap.put(key, minimum); + builder.withAtomicLongMinimum(key, minimum); + } + } + if (meanStatistics != null) { + for (String key : meanStatistics) { + meanStatisticMap.put(key, new MeanStatistic()); + builder.withMeanStatisticFunction(key, k -> meanStatisticMap.get(k)); + } + } + setWrapped(builder.build()); + } + + /** + * Set an atomic long to a value. + * @param aLong atomic long; may be null + * @param value value to set to + */ + private void setAtomicLong(final AtomicLong aLong, final long value) { + if (aLong != null) { + aLong.set(value); + } + } + + /** + * increment an atomic long and return its value; + * null long is no-op returning 0. + * @param aLong atomic long; may be null + * param increment amount to increment; negative for a decrement + * @return final value or 0 if the long is null + */ + private long incAtomicLong(final AtomicLong aLong, + final long increment) { + if (aLong != null) { + // optimization: zero is a get rather than addAndGet() + return increment != 0 + ? aLong.addAndGet(increment) + : aLong.get(); + } else { + return 0; + } + } + + @Override + public void setCounter(final String key, final long value) { + setAtomicLong(counterMap.get(key), value); + LOG.debug("Setting counter {} to {}", key, value); + } + + @Override + public long incrementCounter(final String key, final long value) { + AtomicLong counter = counterMap.get(key); + if (counter == null) { + LOG.debug("Ignoring counter increment for unknown counter {}", + key); + return 0; + } + if (value < 0) { + LOG.debug("Ignoring negative increment value {} for counter {}", + value, key); + // returns old value + return counter.get(); + } else { + long l = incAtomicLong(counter, value); + LOG.debug("Incrementing counter {} by {} with final value {}", + key, value, l); + return l; + } + } + + @Override + public void setMaximum(final String key, final long value) { + setAtomicLong(maximumMap.get(key), value); + } + + @Override + public long incrementMaximum(final String key, final long value) { + return incAtomicLong(maximumMap.get(key), value); + } + + @Override + public void setMinimum(final String key, final long value) { + setAtomicLong(minimumMap.get(key), value); + } + + @Override + public long incrementMinimum(final String key, final long value) { + return incAtomicLong(minimumMap.get(key), value); + } + + @Override + public void addMinimumSample(final String key, final long value) { + AtomicLong min = minimumMap.get(key); + if (min != null) { + maybeUpdateMinimum(min, value); + } + } + + @Override + public void addMaximumSample(final String key, final long value) { + AtomicLong max = maximumMap.get(key); + if (max != null) { + maybeUpdateMaximum(max, value); + } + } + + @Override + public void setGauge(final String key, final long value) { + setAtomicLong(gaugeMap.get(key), value); + } + + @Override + public long incrementGauge(final String key, final long value) { + return incAtomicLong(gaugeMap.get(key), value); + } + + @Override + public void setMeanStatistic(final String key, final MeanStatistic value) { + final MeanStatistic ref = meanStatisticMap.get(key); + if (ref != null) { + ref.set(value); + } + } + + @Override + public void addMeanStatisticSample(final String key, final long value) { + final MeanStatistic ref = meanStatisticMap.get(key); + if (ref != null) { + ref.addSample(value); + } + } + + /** + * Reset all statistics. + */ + @Override + public synchronized void reset() { + counterMap.values().forEach(a -> a.set(0)); + gaugeMap.values().forEach(a -> a.set(0)); + minimumMap.values().forEach(a -> a.set(0)); + maximumMap.values().forEach(a -> a.set(0)); + meanStatisticMap.values().forEach(a -> a.clear()); + } + + /** + * Aggregate those statistics which the store is tracking; + * ignore the rest. + * + * @param source statistics; may be null + * @return true if a statistics reference was supplied/aggregated. + */ + @Override + public synchronized boolean aggregate( + @Nullable final IOStatistics source) { + + if (source == null) { + return false; + } + // counters: addition + Map sourceCounters = source.counters(); + counterMap.entrySet(). + forEach(e -> { + Long sourceValue = lookupQuietly(sourceCounters, e.getKey()); + if (sourceValue != null) { + e.getValue().addAndGet(sourceValue); + } + }); + // gauge: add positive values only + Map sourceGauges = source.gauges(); + gaugeMap.entrySet().forEach(e -> { + Long sourceGauge = lookupQuietly(sourceGauges, e.getKey()); + if (sourceGauge != null && sourceGauge > 0) { + e.getValue().addAndGet(sourceGauge); + } + }); + // min: min of current and source + Map sourceMinimums = source.minimums(); + minimumMap.entrySet().forEach(e -> { + Long sourceValue = lookupQuietly(sourceMinimums, e.getKey()); + if (sourceValue != null) { + AtomicLong dest = e.getValue(); + dest.set(aggregateMaximums(dest.get(), sourceValue)); + dest.set(aggregateMinimums(dest.get(), sourceValue)); + } + }); + // max: max of current and source + Map sourceMaximums = source.maximums(); + maximumMap.entrySet().forEach(e -> { + Long sourceValue = lookupQuietly(sourceMaximums, e.getKey()); + if (sourceValue != null) { + AtomicLong dest = e.getValue(); + dest.set(aggregateMaximums(dest.get(), sourceValue)); + } + }); + // the most complex + Map sourceMeans = source.meanStatistics(); + meanStatisticMap.entrySet().forEach(e -> { + MeanStatistic current = e.getValue(); + MeanStatistic sourceValue = lookupQuietly( + sourceMeans, e.getKey()); + if (sourceValue != null) { + current.add(sourceValue); + } + }); + return true; + } + + /** + * Get a reference to the map type providing the + * value for a specific key, raising an exception if + * there is no entry for that key. + * @param type of map/return type. + * @param map map to look up + * @param key statistic name + * @return the value + * @throws NullPointerException if there is no entry of that name + */ + private static T lookup(final Map map, String key) { + T val = map.get(key); + requireNonNull(val, () -> ("unknown statistic " + key)); + return val; + } + + /** + * Get a reference to the map type providing the + * value for a specific key, returning null if it not found. + * @param type of map/return type. + * @param map map to look up + * @param key statistic name + * @return the value + */ + private static T lookupQuietly(final Map map, String key) { + return map.get(key); + } + + /** + * Get a reference to the atomic instance providing the + * value for a specific counter. This is useful if + * the value is passed around. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + @Override + public AtomicLong getCounterReference(String key) { + return lookup(counterMap, key); + } + + /** + * Get a reference to the atomic instance providing the + * value for a specific maximum. This is useful if + * the value is passed around. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + @Override + public AtomicLong getMaximumReference(String key) { + return lookup(maximumMap, key); + } + + /** + * Get a reference to the atomic instance providing the + * value for a specific minimum. This is useful if + * the value is passed around. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + @Override + public AtomicLong getMinimumReference(String key) { + return lookup(minimumMap, key); + } + + /** + * Get a reference to the atomic instance providing the + * value for a specific gauge. This is useful if + * the value is passed around. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + @Override + public AtomicLong getGaugeReference(String key) { + return lookup(gaugeMap, key); + } + + /** + * Get a mean statistic. + * @param key statistic name + * @return the reference + * @throws NullPointerException if there is no entry of that name + */ + @Override + public MeanStatistic getMeanStatistic(String key) { + return lookup(meanStatisticMap, key); + } + + /** + * Add a duration to the min/mean/max statistics, using the + * given prefix and adding a suffix for each specific value. + *

    + * The update is non -atomic, even though each individual statistic + * is updated thread-safely. If two threads update the values + * simultaneously, at the end of each operation the state will + * be correct. It is only during the sequence that the statistics + * may be observably inconsistent. + *

    + * @param prefix statistic prefix + * @param durationMillis duration in milliseconds. + */ + @Override + public void addTimedOperation(String prefix, long durationMillis) { + addMeanStatisticSample(prefix + SUFFIX_MEAN, durationMillis); + addMinimumSample(prefix + SUFFIX_MIN, durationMillis); + addMaximumSample(prefix + SUFFIX_MAX, durationMillis); + } + + @Override + public void addTimedOperation(String prefix, Duration duration) { + addTimedOperation(prefix, duration.toMillis()); + } + + /** + * If the store is tracking the given key, return the + * duration tracker for it. If not tracked, return the + * stub tracker. + * @param key statistic key prefix + * @param count #of times to increment the matching counter in this + * operation. + * @return a tracker. + */ + @Override + public DurationTracker trackDuration(final String key, final long count) { + if (counterMap.containsKey(key)) { + return new StatisticDurationTracker(this, key, count); + } else { + return stubDurationTracker(); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/PairedDurationTrackerFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/PairedDurationTrackerFactory.java new file mode 100644 index 0000000000000..33b13f78418a9 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/PairedDurationTrackerFactory.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.time.Duration; + +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; + +/** + * A duration tracker factory which aggregates two other trackers + * to have the same lifecycle. + * + * This is to ease having instance-level tracking alongside global + * values, such as an input stream and a filesystem. + * + * It's got some inefficiencies -assuming system time is used for + * the tracking, System.currentTimeMillis will be invoked twice + * at each point of the process -and the results may actually be different. + * However, it enables multiple duration tracker factories to be given the + * opportunity to collect the statistics. + */ +final class PairedDurationTrackerFactory implements DurationTrackerFactory { + + private final DurationTrackerFactory local; + private final DurationTrackerFactory global; + + PairedDurationTrackerFactory(final DurationTrackerFactory local, + final DurationTrackerFactory global) { + this.local = local; + this.global = global; + } + + @Override + public DurationTracker trackDuration(final String key, final long count) { + return new PairedDurationTracker( + global.trackDuration(key, count), + local.trackDuration(key, count)); + } + + /** + * Tracker which wraps the two duration trackers created for the operation. + */ + private static final class PairedDurationTracker + implements DurationTracker { + private final DurationTracker firstDuration; + private final DurationTracker secondDuration; + + private PairedDurationTracker( + final DurationTracker firstDuration, + final DurationTracker secondDuration) { + this.firstDuration = firstDuration; + this.secondDuration = secondDuration; + } + + @Override + public void failed() { + firstDuration.failed(); + secondDuration.failed(); + } + + @Override + public void close() { + firstDuration.close(); + secondDuration.close(); + } + + /** + * @return the global duration + */ + @Override + public Duration asDuration() { + return firstDuration.asDuration(); + } + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/SourceWrappedStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/SourceWrappedStatistics.java new file mode 100644 index 0000000000000..5aced7c5cddbf --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/SourceWrappedStatistics.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; + +/** + * Wrap a statistics instance with an {@link IOStatisticsSource} + * instance which will then serve up the statistics when asked. + */ +public class SourceWrappedStatistics implements IOStatisticsSource { + + private final IOStatistics source; + + /** + * Constructor. + * @param source source of statistics. + */ + public SourceWrappedStatistics(final IOStatistics source) { + this.source = source; + } + + @Override + public IOStatistics getIOStatistics() { + return source; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StatisticDurationTracker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StatisticDurationTracker.java new file mode 100644 index 0000000000000..ef9e7cb107a0d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StatisticDurationTracker.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; +import org.apache.hadoop.util.OperationDuration; + +/** + * Track the duration of an object. + * + * When closed the + * min/max/mean statistics are updated. + * + * In the constructor, the counter with name of 'key' is + * incremented -default is by 1, but can be set to other + * values, including 0. + */ +public class StatisticDurationTracker extends OperationDuration + implements DurationTracker { + + /** + * Statistics to update. + */ + private final IOStatisticsStore iostats; + + /** + * Key to use as prefix of values. + */ + private final String key; + + /** + * Flag to indicate the operation failed. + */ + private boolean failed; + + /** + * Constructor -increments the counter by 1. + * @param iostats statistics to update + * @param key prefix of values. + */ + public StatisticDurationTracker( + final IOStatisticsStore iostats, + final String key) { + this(iostats, key, 1); + } + + /** + * Constructor. + * If the supplied count is greater than zero, the counter + * of the key name is updated. + * @param iostats statistics to update + * @param key Key to use as prefix of values. + * @param count #of times to increment the matching counter. + */ + public StatisticDurationTracker( + final IOStatisticsStore iostats, + final String key, + final long count) { + this.iostats = iostats; + this.key = key; + if (count > 0) { + iostats.incrementCounter(key, count); + } + } + + @Override + public void failed() { + failed = true; + } + + /** + * Set the finished time and then update the statistics. + * If the operation failed then the key + .failures counter will be + * incremented by one. + * The operation min/mean/max values will be updated with the duration; + * on a failure these will all be the .failures metrics. + */ + @Override + public void close() { + finished(); + String name = key; + if (failed) { + // failure: + name = key + StoreStatisticNames.SUFFIX_FAILURES; + iostats.incrementCounter(name); + } + iostats.addTimedOperation(name, asDuration()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StorageStatisticsFromIOStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StorageStatisticsFromIOStatistics.java new file mode 100644 index 0000000000000..f586cd8d9bdd4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StorageStatisticsFromIOStatistics.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.fs.statistics.IOStatistics; + +/** + * Returns all the counters of an IOStatistics instance as StorageStatistics. + * This is dynamic. + * The {@link #reset()} is downgraded to a no-op. + */ +public class StorageStatisticsFromIOStatistics + extends StorageStatistics + implements Iterable { + + private final IOStatistics ioStatistics; + private final String scheme; + + /** + * Instantiate. + * @param name storage statistics name. + * @param scheme FS scheme; may be null. + * @param ioStatistics IOStatistics source. + */ + public StorageStatisticsFromIOStatistics( + final String name, + final String scheme, + final IOStatistics ioStatistics) { + super(name); + this.scheme = scheme; + this.ioStatistics = ioStatistics; + } + + @Override + public Iterator iterator() { + return getLongStatistics(); + } + + /** + * Take a snapshot of the current counter values + * and return an iterator over them. + * @return all the counter statistics. + */ + @Override + public Iterator getLongStatistics() { + final Set> counters = counters() + .entrySet(); + final Set statisticSet = counters.stream().map( + this::toLongStatistic) + .collect(Collectors.toSet()); + + // add the gauges + gauges().entrySet().forEach(entry -> + statisticSet.add(toLongStatistic(entry))); + return statisticSet.iterator(); + } + + /** + * Convert a counter/gauge entry to a long statistics. + * @param e entry + * @return statistic + */ + private LongStatistic toLongStatistic(final Map.Entry e) { + return new LongStatistic(e.getKey(), e.getValue()); + } + + private Map counters() { + return ioStatistics.counters(); + } + + private Map gauges() { + return ioStatistics.gauges(); + } + + @Override + public Long getLong(final String key) { + Long l = counters().get(key); + if (l == null) { + l = gauges().get(key); + } + return l; + } + + @Override + public boolean isTracked(final String key) { + return counters().containsKey(key) + || gauges().containsKey(key); + } + + @Override + public void reset() { + /* no-op */ + } + + @Override + public String getScheme() { + return scheme; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StubDurationTracker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StubDurationTracker.java new file mode 100644 index 0000000000000..638a9da9c7b51 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StubDurationTracker.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.time.Duration; + +import org.apache.hadoop.fs.statistics.DurationTracker; + +/** + * A simple stub duration tracker which can be issued in interfaces + * and other places where full duration tracking is not implemented. + */ +public final class StubDurationTracker implements DurationTracker { + + public static final DurationTracker STUB_DURATION_TRACKER = + new StubDurationTracker(); + + private StubDurationTracker() { + } + + @Override + public void failed() { + + } + + @Override + public void close() { + + } + + @Override + public Duration asDuration() { + return Duration.ZERO; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StubDurationTrackerFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StubDurationTrackerFactory.java new file mode 100644 index 0000000000000..8856b6330cee6 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/StubDurationTrackerFactory.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; + +/** + * This is a stub factory which always returns no-op duration + * trackers. Allows for code to always be handed a factory. + */ +public final class StubDurationTrackerFactory + implements DurationTrackerFactory { + + /** + * Single instance. + */ + public static final StubDurationTrackerFactory STUB_DURATION_TRACKER_FACTORY + = new StubDurationTrackerFactory(); + + private StubDurationTrackerFactory() { + } + + @Override + public DurationTracker trackDuration(final String key, final long count) { + return StubDurationTracker.STUB_DURATION_TRACKER; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/WrappedIOStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/WrappedIOStatistics.java new file mode 100644 index 0000000000000..4e5fc6a6a1071 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/WrappedIOStatistics.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics.impl; + +import java.util.Map; + +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.MeanStatistic; + +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; + +/** + * Wrap IOStatistics source with another (dynamic) wrapper. + */ +public class WrappedIOStatistics extends AbstractIOStatisticsImpl { + + /** + * The wrapped statistics. + */ + private IOStatistics wrapped; + + /** + * Instantiate. + * @param wrapped nullable wrapped statistics. + */ + public WrappedIOStatistics(final IOStatistics wrapped) { + this.wrapped = wrapped; + } + + /** + * Instantiate without setting the statistics. + * This is for subclasses which build up the map during their own + * construction. + */ + protected WrappedIOStatistics() { + } + + @Override + public Map counters() { + return getWrapped().counters(); + } + + /** + * Get at the wrapped inner statistics. + * @return the wrapped value + */ + protected IOStatistics getWrapped() { + return wrapped; + } + + /** + * Set the wrapped statistics. + * Will fail if the field is already set. + * @param wrapped new value + */ + protected void setWrapped(final IOStatistics wrapped) { + Preconditions.checkState(this.wrapped == null, + "Attempted to overwrite existing wrapped statistics"); + this.wrapped = wrapped; + } + + @Override + public Map gauges() { + return getWrapped().gauges(); + } + + @Override + public Map minimums() { + return getWrapped().minimums(); + } + + @Override + public Map maximums() { + return getWrapped().maximums(); + } + + @Override + public Map meanStatistics() { + return getWrapped().meanStatistics(); + } + + /** + * Return the statistics dump of the wrapped statistics. + * @return the statistics for logging. + */ + @Override + public String toString() { + return ioStatisticsToString(wrapped); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/package-info.java new file mode 100644 index 0000000000000..3ff7dacadce7a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/package-info.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Implementation support for statistics. + * For use internally; external filesystems MAY use this if the implementors + * accept that it is unstable and that incompatible changes may take + * place over minor point releases. + */ + +@InterfaceAudience.LimitedPrivate("Filesystems") +@InterfaceStability.Unstable +package org.apache.hadoop.fs.statistics.impl; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/package-info.java new file mode 100644 index 0000000000000..bf46b33a516c6 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/package-info.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains support for statistic collection and reporting. + * This is the public API; implementation classes are to be kept elsewhere. + *

    + * This package defines two interfaces: + *

    + * {@link org.apache.hadoop.fs.statistics.IOStatisticsSource}: + * a source of statistic data, which can be retrieved + * through a call to + * {@link org.apache.hadoop.fs.statistics.IOStatisticsSource#getIOStatistics()} . + *

    + * {@link org.apache.hadoop.fs.statistics.IOStatistics} the statistics retrieved + * from a statistics source. + *

    + * The retrieved statistics may be an immutable snapshot -in which case to get + * updated statistics another call to + * {@link org.apache.hadoop.fs.statistics.IOStatisticsSource#getIOStatistics()} + * must be made. Or they may be dynamic -in which case every time a specific + * statistic is retrieved, the latest version is returned. Callers should assume + * that if a statistics instance is dynamic, there is no atomicity when querying + * multiple statistics. If the statistics source was a closeable object (e.g. a + * stream), the statistics MUST remain valid after the stream is closed. + *

    + * Use pattern: + *

    + * An application probes an object (filesystem, stream etc) to see if it + * implements {@code IOStatisticsSource}, and, if it is, + * calls {@code getIOStatistics()} to get its statistics. + * If this is non-null, the client has statistics on the current + * state of the statistics. + *

    + * The expectation is that a statistics source is dynamic: when a value is + * looked up the most recent values are returned. + * When iterating through the set, the values of the iterator SHOULD + * be frozen at the time the iterator was requested. + *

    + * These statistics can be used to: log operations, profile applications, + * and make assertions about the state of the output. + *

    + * The names of statistics are a matter of choice of the specific source. + * However, {@link org.apache.hadoop.fs.statistics.StoreStatisticNames} + * contains a + * set of names recommended for object store operations. + * {@link org.apache.hadoop.fs.statistics.StreamStatisticNames} declares + * recommended names for statistics provided for + * input and output streams. + *

    + * Utility classes: + *

      + *
    • + * {@link org.apache.hadoop.fs.statistics.IOStatisticsSupport}. + * General support, including the ability to take a serializable + * snapshot of the current state of an IOStatistics instance. + *
    • + *
    • + * {@link org.apache.hadoop.fs.statistics.IOStatisticsLogging}. + * Methods for robust/on-demand string conversion, designed + * for use in logging statements and {@code toString()} implementations. + *
    • + *
    • + * {@link org.apache.hadoop.fs.statistics.IOStatisticsSnapshot}. + * A static snaphot of statistics which can be marshalled via + * java serialization or as JSON via jackson. It supports + * aggregation, so can be used to generate aggregate statistics. + *
    • + *
    + * + *

    + * Implementors notes: + *

      + *
    1. + * IOStatistics keys SHOULD be standard names where possible. + *
    2. + *
    3. + * An IOStatistics instance MUST be unique to that specific instance of + * {@link org.apache.hadoop.fs.statistics.IOStatisticsSource}. + * (i.e. not shared the way StorageStatistics are) + *
    4. + *
    5. + * MUST return the same values irrespective of which thread the statistics are + * retrieved or its keys evaluated. + *
    6. + *
    7. + * MUST NOT remove keys once a statistic instance has been created. + *
    8. + *
    9. + * MUST NOT add keys once a statistic instance has been created. + *
    10. + *
    11. + * MUST NOT block for long periods of time while blocking operations + * (reads, writes) are taking place in the source. + * That is: minimal synchronization points (AtomicLongs etc.) may be + * used to share values, but retrieval of statistics should + * be fast and return values even while slow/blocking remote IO is underway. + *
    12. + *
    13. + * MUST support value enumeration and retrieval after the source has been + * closed. + *
    14. + *
    15. + * SHOULD NOT have back-references to potentially expensive objects + * (filesystem instances etc.) + *
    16. + *
    17. + * SHOULD provide statistics which can be added to generate aggregate + * statistics. + *
    18. + *
    + */ + +@InterfaceAudience.Public +@InterfaceStability.Unstable +package org.apache.hadoop.fs.statistics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java index 4c3dae9a9f99b..09ec5d2933058 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java @@ -66,8 +66,7 @@ public static void addLink(Configuration conf, final String mountTableName, */ public static void addLink(final Configuration conf, final String src, final URI target) { - addLink( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - src, target); + addLink(conf, getDefaultMountTableName(conf), src, target); } /** @@ -88,8 +87,7 @@ public static void addLinkMergeSlash(Configuration conf, * @param target */ public static void addLinkMergeSlash(Configuration conf, final URI target) { - addLinkMergeSlash(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - target); + addLinkMergeSlash(conf, getDefaultMountTableName(conf), target); } /** @@ -110,8 +108,7 @@ public static void addLinkFallback(Configuration conf, * @param target */ public static void addLinkFallback(Configuration conf, final URI target) { - addLinkFallback(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, - target); + addLinkFallback(conf, getDefaultMountTableName(conf), target); } /** @@ -132,7 +129,18 @@ public static void addLinkMerge(Configuration conf, * @param targets */ public static void addLinkMerge(Configuration conf, final URI[] targets) { - addLinkMerge(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, targets); + addLinkMerge(conf, getDefaultMountTableName(conf), targets); + } + + /** + * Add nfly link to configuration for the given mount table. + */ + public static void addLinkNfly(Configuration conf, String mountTableName, + String src, String settings, final String targets) { + conf.set( + getConfigViewFsPrefix(mountTableName) + "." + + Constants.CONFIG_VIEWFS_LINK_NFLY + "." + settings + "." + src, + targets); } /** @@ -149,16 +157,35 @@ public static void addLinkNfly(Configuration conf, String mountTableName, settings = settings == null ? "minReplication=2,repairOnRead=true" : settings; - - conf.set(getConfigViewFsPrefix(mountTableName) + "." + - Constants.CONFIG_VIEWFS_LINK_NFLY + "." + settings + "." + src, + addLinkNfly(conf, mountTableName, src, settings, StringUtils.uriToString(targets)); } public static void addLinkNfly(final Configuration conf, final String src, final URI ... targets) { - addLinkNfly(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, src, null, - targets); + addLinkNfly(conf, getDefaultMountTableName(conf), src, null, targets); + } + + /** + * Add a LinkRegex to the config for the specified mount table. + * @param conf - get mountable config from this conf + * @param mountTableName - the mountable name of the regex config item + * @param srcRegex - the src path regex expression that applies to this config + * @param targetStr - the string of target path + * @param interceptorSettings - the serialized interceptor string to be + * applied while resolving the mapping + */ + public static void addLinkRegex( + Configuration conf, final String mountTableName, final String srcRegex, + final String targetStr, final String interceptorSettings) { + String prefix = getConfigViewFsPrefix(mountTableName) + "." + + Constants.CONFIG_VIEWFS_LINK_REGEX + "."; + if ((interceptorSettings != null) && (!interceptorSettings.isEmpty())) { + prefix = prefix + interceptorSettings + + RegexMountPoint.SETTING_SRCREGEX_SEP; + } + String key = prefix + srcRegex; + conf.set(key, targetStr); } /** @@ -168,8 +195,7 @@ public static void addLinkNfly(final Configuration conf, final String src, */ public static void setHomeDirConf(final Configuration conf, final String homedir) { - setHomeDirConf( conf, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, homedir); + setHomeDirConf(conf, getDefaultMountTableName(conf), homedir); } /** @@ -193,7 +219,7 @@ public static void setHomeDirConf(final Configuration conf, * @return home dir value, null if variable is not in conf */ public static String getHomeDirValue(final Configuration conf) { - return getHomeDirValue(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); + return getHomeDirValue(conf, getDefaultMountTableName(conf)); } /** @@ -207,4 +233,18 @@ public static String getHomeDirValue(final Configuration conf, return conf.get(getConfigViewFsPrefix(mountTableName) + "." + Constants.CONFIG_VIEWFS_HOMEDIR); } + + /** + * Get the name of the default mount table to use. If + * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY} is specified, + * it's value is returned. Otherwise, + * {@link Constants#CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE} is returned. + * + * @param conf Configuration to use. + * @return the name of the default mount table to use. + */ + public static String getDefaultMountTableName(final Configuration conf) { + return conf.get(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java index 37f1a16800e7d..bf9f7db7223d8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java @@ -30,18 +30,29 @@ public interface Constants { * Prefix for the config variable prefix for the ViewFs mount-table */ public static final String CONFIG_VIEWFS_PREFIX = "fs.viewfs.mounttable"; + + /** + * Prefix for the config variable for the ViewFs mount-table path. + */ + String CONFIG_VIEWFS_MOUNTTABLE_PATH = CONFIG_VIEWFS_PREFIX + ".path"; /** * Prefix for the home dir for the mount table - if not specified * then the hadoop default value (/user) is used. */ public static final String CONFIG_VIEWFS_HOMEDIR = "homedir"; - + + /** + * Config key to specify the name of the default mount table. + */ + String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY = + "fs.viewfs.mounttable.default.name.key"; + /** * Config variable name for the default mount table. */ public static final String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE = "default"; - + /** * Config variable full prefix for the default mount table. */ @@ -75,6 +86,14 @@ public interface Constants { */ String CONFIG_VIEWFS_LINK_MERGE_SLASH = "linkMergeSlash"; + /** + * Config variable for specifying a regex link which uses regular expressions + * as source and target could use group captured in src. + * E.g. (^/(?\\w+), /prefix-${firstDir}) => + * (/path1/file1 => /prefix-path1/file1) + */ + String CONFIG_VIEWFS_LINK_REGEX = "linkRegex"; + FsPermission PERMISSION_555 = new FsPermission((short) 0555); String CONFIG_VIEWFS_RENAME_STRATEGY = "fs.viewfs.rename.strategy"; @@ -85,4 +104,25 @@ public interface Constants { String CONFIG_VIEWFS_ENABLE_INNER_CACHE = "fs.viewfs.enable.inner.cache"; boolean CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT = true; + + /** + * Enable ViewFileSystem to show mountlinks as symlinks. + */ + String CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS = + "fs.viewfs.mount.links.as.symlinks"; + + boolean CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT = true; + + /** + * When initializing the viewfs, authority will be used as the mount table + * name to find the mount link configurations. To make the mount table name + * unique, we may want to ignore port if initialized uri authority contains + * port number. By default, we will consider port number also in + * ViewFileSystem(This default value false, because to support existing + * deployments continue with the current behavior). + */ + String CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME = + "fs.viewfs.ignore.port.in.mount.table.name"; + + boolean CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT = false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/FsGetter.java similarity index 57% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/FsGetter.java index e9959c192df83..071af11e63bf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSMultipartUploaderFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/FsGetter.java @@ -15,26 +15,33 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs; +package org.apache.hadoop.fs.viewfs; +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileSystemMultipartUploader; -import org.apache.hadoop.fs.MultipartUploader; -import org.apache.hadoop.fs.MultipartUploaderFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; /** - * Support for HDFS multipart uploads, built on - * {@link FileSystem#concat(Path, Path[])}. + * File system instance getter. */ -public class DFSMultipartUploaderFactory extends MultipartUploaderFactory { - protected MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) { - if (fs.getScheme().equals(HdfsConstants.HDFS_URI_SCHEME)) { - return new FileSystemMultipartUploader(fs); - } - return null; +@Private +class FsGetter { + + /** + * Gets new file system instance of given uri. + */ + public FileSystem getNewInstance(URI uri, Configuration conf) + throws IOException { + return FileSystem.newInstance(uri, conf); + } + + /** + * Gets file system instance of given uri. + */ + public FileSystem get(URI uri, Configuration conf) throws IOException { + return FileSystem.get(uri, conf); } -} +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java new file mode 100644 index 0000000000000..3968e3650cf39 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An implementation for Apache Hadoop compatible file system based mount-table + * file loading. + */ +public class HCFSMountTableConfigLoader implements MountTableConfigLoader { + private static final String REGEX_DOT = "[.]"; + private static final Logger LOGGER = + LoggerFactory.getLogger(HCFSMountTableConfigLoader.class); + private Path mountTable = null; + + /** + * Loads the mount-table configuration from hadoop compatible file system and + * add the configuration items to given configuration. Mount-table + * configuration format should be suffixed with version number. + * Format: mount-table..xml + * Example: mount-table.1.xml + * When user wants to update mount-table, the expectation is to upload new + * mount-table configuration file with monotonically increasing integer as + * version number. This API loads the highest version number file. We can + * also configure single file path directly. + * + * @param mountTableConfigPath : A directory path where mount-table files + * stored or a mount-table file path. We recommend to configure + * directory with the mount-table version files. + * @param conf : to add the mount table as resource. + */ + @Override + public void load(String mountTableConfigPath, Configuration conf) + throws IOException { + this.mountTable = new Path(mountTableConfigPath); + String scheme = mountTable.toUri().getScheme(); + FsGetter fsGetter = new ViewFileSystemOverloadScheme.ChildFsGetter(scheme); + try (FileSystem fs = fsGetter.getNewInstance(mountTable.toUri(), conf)) { + RemoteIterator listFiles = + fs.listFiles(mountTable, false); + LocatedFileStatus lfs = null; + int higherVersion = -1; + while (listFiles.hasNext()) { + LocatedFileStatus curLfs = listFiles.next(); + String cur = curLfs.getPath().getName(); + String[] nameParts = cur.split(REGEX_DOT); + if (nameParts.length < 2) { + logInvalidFileNameFormat(cur); + continue; // invalid file name + } + int curVersion = higherVersion; + try { + curVersion = Integer.parseInt(nameParts[nameParts.length - 2]); + } catch (NumberFormatException nfe) { + logInvalidFileNameFormat(cur); + continue; + } + + if (curVersion > higherVersion) { + higherVersion = curVersion; + lfs = curLfs; + } + } + + if (lfs == null) { + // No valid mount table file found. + // TODO: Should we fail? Currently viewfs init will fail if no mount + // links anyway. + LOGGER.warn("No valid mount-table file exist at: {}. At least one " + + "mount-table file should present with the name format: " + + "mount-table..xml", mountTableConfigPath); + return; + } + // Latest version file. + Path latestVersionMountTable = lfs.getPath(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Loading the mount-table {} into configuration.", + latestVersionMountTable); + } + try (FSDataInputStream open = fs.open(latestVersionMountTable)) { + Configuration newConf = new Configuration(false); + newConf.addResource(open); + // This will add configuration props as resource, instead of stream + // itself. So, that stream can be closed now. + conf.addResource(newConf); + } + } + } + + private void logInvalidFileNameFormat(String cur) { + LOGGER.warn("Invalid file name format for mount-table version file: {}. " + + "The valid file name format is mount-table-name..xml", + cur); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index 69923438ecc20..fd7b5619b274a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs.viewfs; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; @@ -34,10 +34,13 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * InodeTree implements a mount-table as a tree of inodes. @@ -45,19 +48,22 @@ * In order to use it the caller must subclass it and implement * the abstract methods {@link #getTargetFileSystem(INodeDir)}, etc. * - * The mountable is initialized from the config variables as + * The mountable is initialized from the config variables as * specified in {@link ViewFs} * * @param is AbstractFileSystem or FileSystem * * The two main methods are * {@link #InodeTree(Configuration, String)} // constructor - * {@link #resolve(String, boolean)} + * {@link #resolve(String, boolean)} */ @InterfaceAudience.Private @InterfaceStability.Unstable abstract class InodeTree { + private static final Logger LOGGER = + LoggerFactory.getLogger(InodeTree.class.getName()); + enum ResultKind { INTERNAL_DIR, EXTERNAL_DIR @@ -67,10 +73,12 @@ enum ResultKind { // the root of the mount table private final INode root; // the fallback filesystem - private final INodeLink rootFallbackLink; + private INodeLink rootFallbackLink; // the homedir for this mount table private final String homedirPrefix; private List> mountPoints = new ArrayList>(); + private List> regexMountPointList = + new ArrayList>(); static class MountPoint { String src; @@ -121,8 +129,9 @@ boolean isLink() { */ static class INodeDir extends INode { private final Map> children = new HashMap<>(); - private T internalDirFs = null; //filesystem of this internal directory + private T internalDirFs = null; //filesystem of this internal directory private boolean isRoot = false; + private INodeLink fallbackLink = null; INodeDir(final String pathToNode, final UserGroupInformation aUgi) { super(pathToNode, aUgi); @@ -149,6 +158,17 @@ boolean isRoot() { return isRoot; } + INodeLink getFallbackLink() { + return fallbackLink; + } + + void addFallbackLink(INodeLink link) throws IOException { + if (!isRoot) { + throw new IOException("Fallback link can only be added for root"); + } + this.fallbackLink = link; + } + Map> getChildren() { return Collections.unmodifiableMap(children); } @@ -213,7 +233,14 @@ enum LinkType { * Config prefix: fs.viewfs.mounttable..linkNfly * Refer: {@link Constants#CONFIG_VIEWFS_LINK_NFLY} */ - NFLY; + NFLY, + /** + * Link entry which source are regex exrepssions and target refer matched + * group from source + * Config prefix: fs.viewfs.mounttable..linkRegex + * Refer: {@link Constants#CONFIG_VIEWFS_LINK_REGEX} + */ + REGEX; } /** @@ -362,7 +389,7 @@ protected abstract T getTargetFileSystem(URI uri) throws UnsupportedFileSystemException, URISyntaxException, IOException; protected abstract T getTargetFileSystem(INodeDir dir) - throws URISyntaxException; + throws URISyntaxException, IOException; protected abstract T getTargetFileSystem(String settings, URI[] mergeFsURIs) throws UnsupportedFileSystemException, URISyntaxException, IOException; @@ -381,7 +408,16 @@ private boolean hasFallbackLink() { return rootFallbackLink != null; } - private INodeLink getRootFallbackLink() { + /** + * @return true if the root represented as internalDir. In LinkMergeSlash, + * there will be root to root mapping. So, root does not represent as + * internalDir. + */ + protected boolean isRootInternalDir() { + return root.isInternalDir(); + } + + protected INodeLink getRootFallbackLink() { Preconditions.checkState(root.isInternalDir()); return rootFallbackLink; } @@ -440,7 +476,7 @@ Configuration getConfig() { /** * Create Inode Tree from the specified mount-table specified in Config - * @param config - the mount table keys are prefixed with + * @param config - the mount table keys are prefixed with * FsConstants.CONFIG_VIEWFS_PREFIX * @param viewName - the name of the mount table - if null use defaultMT name * @throws UnsupportedFileSystemException @@ -448,12 +484,13 @@ Configuration getConfig() { * @throws FileAlreadyExistsException * @throws IOException */ - protected InodeTree(final Configuration config, final String viewName) + protected InodeTree(final Configuration config, final String viewName, + final URI theUri, boolean initingUriAsFallbackOnNoMounts) throws UnsupportedFileSystemException, URISyntaxException, FileAlreadyExistsException, IOException { String mountTableName = viewName; if (mountTableName == null) { - mountTableName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; + mountTableName = ConfigUtil.getDefaultMountTableName(config); } homedirPrefix = ConfigUtil.getHomeDirValue(config, mountTableName); @@ -472,84 +509,82 @@ protected InodeTree(final Configuration config, final String viewName) final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); for (Entry si : config) { final String key = si.getKey(); - if (key.startsWith(mountTablePrefix)) { - gotMountTableEntry = true; - LinkType linkType; - String src = key.substring(mountTablePrefix.length()); - String settings = null; - if (src.startsWith(linkPrefix)) { - src = src.substring(linkPrefix.length()); - if (src.equals(SlashPath.toString())) { - throw new UnsupportedFileSystemException("Unexpected mount table " - + "link entry '" + key + "'. Use " - + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH + " instead!"); - } - linkType = LinkType.SINGLE; - } else if (src.startsWith(linkFallbackPrefix)) { - if (src.length() != linkFallbackPrefix.length()) { - throw new IOException("ViewFs: Mount points initialization error." + - " Invalid " + Constants.CONFIG_VIEWFS_LINK_FALLBACK + - " entry in config: " + src); - } - linkType = LinkType.SINGLE_FALLBACK; - } else if (src.startsWith(linkMergePrefix)) { // A merge link - src = src.substring(linkMergePrefix.length()); - linkType = LinkType.MERGE; - } else if (src.startsWith(linkMergeSlashPrefix)) { - // This is a LinkMergeSlash entry. This entry should - // not have any additional source path. - if (src.length() != linkMergeSlashPrefix.length()) { - throw new IOException("ViewFs: Mount points initialization error." + - " Invalid " + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH + - " entry in config: " + src); - } - linkType = LinkType.MERGE_SLASH; - } else if (src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY)) { - // prefix.settings.src - src = src.substring(Constants.CONFIG_VIEWFS_LINK_NFLY.length() + 1); - // settings.src - settings = src.substring(0, src.indexOf('.')); - // settings - - // settings.src - src = src.substring(settings.length() + 1); - // src - - linkType = LinkType.NFLY; - } else if (src.startsWith(Constants.CONFIG_VIEWFS_HOMEDIR)) { - // ignore - we set home dir from config - continue; - } else { - throw new IOException("ViewFs: Cannot initialize: Invalid entry in " + - "Mount table in config: " + src); + if (!key.startsWith(mountTablePrefix)) { + continue; + } + + gotMountTableEntry = true; + LinkType linkType; + String src = key.substring(mountTablePrefix.length()); + String settings = null; + if (src.startsWith(linkPrefix)) { + src = src.substring(linkPrefix.length()); + if (src.equals(SlashPath.toString())) { + throw new UnsupportedFileSystemException("Unexpected mount table " + + "link entry '" + key + "'. Use " + + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH + " instead!"); } + linkType = LinkType.SINGLE; + } else if (src.startsWith(linkFallbackPrefix)) { + checkMntEntryKeyEqualsTarget(src, linkFallbackPrefix); + linkType = LinkType.SINGLE_FALLBACK; + } else if (src.startsWith(linkMergePrefix)) { // A merge link + src = src.substring(linkMergePrefix.length()); + linkType = LinkType.MERGE; + } else if (src.startsWith(linkMergeSlashPrefix)) { + // This is a LinkMergeSlash entry. This entry should + // not have any additional source path. + checkMntEntryKeyEqualsTarget(src, linkMergeSlashPrefix); + linkType = LinkType.MERGE_SLASH; + } else if (src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY)) { + // prefix.settings.src + src = src.substring(Constants.CONFIG_VIEWFS_LINK_NFLY.length() + 1); + // settings.src + settings = src.substring(0, src.indexOf('.')); + // settings + + // settings.src + src = src.substring(settings.length() + 1); + // src + + linkType = LinkType.NFLY; + } else if (src.startsWith(Constants.CONFIG_VIEWFS_LINK_REGEX)) { + linkEntries.add( + buildLinkRegexEntry(config, ugi, src, si.getValue())); + continue; + } else if (src.startsWith(Constants.CONFIG_VIEWFS_HOMEDIR)) { + // ignore - we set home dir from config + continue; + } else { + throw new IOException("ViewFs: Cannot initialize: Invalid entry in " + + "Mount table in config: " + src); + } - final String target = si.getValue(); - if (linkType != LinkType.MERGE_SLASH) { - if (isMergeSlashConfigured) { - throw new IOException("Mount table " + mountTableName - + " has already been configured with a merge slash link. " - + "A regular link should not be added."); - } - linkEntries.add( - new LinkEntry(src, target, linkType, settings, ugi, config)); - } else { - if (!linkEntries.isEmpty()) { - throw new IOException("Mount table " + mountTableName - + " has already been configured with regular links. " - + "A merge slash link should not be configured."); - } - if (isMergeSlashConfigured) { - throw new IOException("Mount table " + mountTableName - + " has already been configured with a merge slash link. " - + "Multiple merge slash links for the same mount table is " - + "not allowed."); - } - isMergeSlashConfigured = true; - mergeSlashTarget = target; + final String target = si.getValue(); + if (linkType != LinkType.MERGE_SLASH) { + if (isMergeSlashConfigured) { + throw new IOException("Mount table " + mountTableName + + " has already been configured with a merge slash link. " + + "A regular link should not be added."); + } + linkEntries.add( + new LinkEntry(src, target, linkType, settings, ugi, config)); + } else { + if (!linkEntries.isEmpty()) { + throw new IOException("Mount table " + mountTableName + + " has already been configured with regular links. " + + "A merge slash link should not be configured."); + } + if (isMergeSlashConfigured) { + throw new IOException("Mount table " + mountTableName + + " has already been configured with a merge slash link. " + + "Multiple merge slash links for the same mount table is " + + "not allowed."); } + isMergeSlashConfigured = true; + mergeSlashTarget = target; } - } + } // End of for loop. if (isMergeSlashConfigured) { Preconditions.checkNotNull(mergeSlashTarget); @@ -564,7 +599,8 @@ protected InodeTree(final Configuration config, final String viewName) getRootDir().setRoot(true); INodeLink fallbackLink = null; for (LinkEntry le : linkEntries) { - if (le.isLinkType(LinkType.SINGLE_FALLBACK)) { + switch (le.getLinkType()) { + case SINGLE_FALLBACK: if (fallbackLink != null) { throw new IOException("Mount table " + mountTableName + " has already been configured with a link fallback. " @@ -574,21 +610,86 @@ protected InodeTree(final Configuration config, final String viewName) fallbackLink = new INodeLink(mountTableName, ugi, getTargetFileSystem(new URI(le.getTarget())), new URI(le.getTarget())); - } else { + continue; + case REGEX: + addRegexMountEntry(le); + continue; + default: createLink(le.getSrc(), le.getTarget(), le.getLinkType(), le.getSettings(), le.getUgi(), le.getConfig()); } } rootFallbackLink = fallbackLink; + getRootDir().addFallbackLink(rootFallbackLink); } if (!gotMountTableEntry) { - throw new IOException( - "ViewFs: Cannot initialize: Empty Mount table in config for " + - "viewfs://" + mountTableName + "/"); + if (!initingUriAsFallbackOnNoMounts) { + throw new IOException(new StringBuilder( + "ViewFs: Cannot initialize: Empty Mount table in config for ") + .append(theUri.getScheme()).append("://").append(mountTableName) + .append("/").toString()); + } + StringBuilder msg = + new StringBuilder("Empty mount table detected for ").append(theUri) + .append(" and considering itself as a linkFallback."); + FileSystem.LOG.info(msg.toString()); + rootFallbackLink = + new INodeLink(mountTableName, ugi, getTargetFileSystem(theUri), + theUri); + getRootDir().addFallbackLink(rootFallbackLink); + } + } + + private void checkMntEntryKeyEqualsTarget( + String mntEntryKey, String targetMntEntryKey) throws IOException { + if (!mntEntryKey.equals(targetMntEntryKey)) { + throw new IOException("ViewFs: Mount points initialization error." + + " Invalid " + targetMntEntryKey + + " entry in config: " + mntEntryKey); } } + private void addRegexMountEntry(LinkEntry le) throws IOException { + LOGGER.info("Add regex mount point:" + le.getSrc() + + ", target:" + le.getTarget() + + ", interceptor settings:" + le.getSettings()); + RegexMountPoint regexMountPoint = + new RegexMountPoint( + this, le.getSrc(), le.getTarget(), le.getSettings()); + regexMountPoint.initialize(); + regexMountPointList.add(regexMountPoint); + } + + private LinkEntry buildLinkRegexEntry( + Configuration config, UserGroupInformation ugi, + String mntEntryStrippedKey, String mntEntryValue) { + String linkKeyPath = null; + String settings = null; + final String linkRegexPrefix = Constants.CONFIG_VIEWFS_LINK_REGEX + "."; + // settings#.linkKey + String settingsAndLinkKeyPath = + mntEntryStrippedKey.substring(linkRegexPrefix.length()); + int settingLinkKeySepIndex = settingsAndLinkKeyPath + .indexOf(RegexMountPoint.SETTING_SRCREGEX_SEP); + if (settingLinkKeySepIndex == -1) { + // There's no settings + linkKeyPath = settingsAndLinkKeyPath; + settings = null; + } else { + // settings#.linkKey style configuration + // settings from settings#.linkKey + settings = + settingsAndLinkKeyPath.substring(0, settingLinkKeySepIndex); + // linkKeyPath + linkKeyPath = settingsAndLinkKeyPath.substring( + settings.length() + RegexMountPoint.SETTING_SRCREGEX_SEP + .length()); + } + return new LinkEntry( + linkKeyPath, mntEntryValue, LinkType.REGEX, settings, ugi, config); + } + /** * Resolve returns ResolveResult. * The caller can continue the resolution of the remainingPath @@ -605,23 +706,31 @@ static class ResolveResult { final T targetFileSystem; final String resolvedPath; final Path remainingPath; // to resolve in the target FileSystem + private final boolean isLastInternalDirLink; ResolveResult(final ResultKind k, final T targetFs, final String resolveP, - final Path remainingP) { + final Path remainingP, boolean isLastIntenalDirLink) { kind = k; targetFileSystem = targetFs; resolvedPath = resolveP; remainingPath = remainingP; + this.isLastInternalDirLink = isLastIntenalDirLink; } // Internal dir path resolution completed within the mount table boolean isInternalDir() { return (kind == ResultKind.INTERNAL_DIR); } + + // Indicates whether the internal dir path resolution completed at the link + // or resolved due to fallback. + boolean isLastInternalDirLink() { + return this.isLastInternalDirLink; + } } /** - * Resolve the pathname p relative to root InodeDir + * Resolve the pathname p relative to root InodeDir. * @param p - input path * @param resolveLastComponent * @return ResolveResult which allows further resolution of the remaining path @@ -629,13 +738,15 @@ boolean isInternalDir() { */ ResolveResult resolve(final String p, final boolean resolveLastComponent) throws FileNotFoundException { + ResolveResult resolveResult = null; String[] path = breakIntoPathComponents(p); if (path.length <= 1) { // special case for when path is "/" T targetFs = root.isInternalDir() ? - getRootDir().getInternalDirFs() : getRootLink().getTargetFileSystem(); - ResolveResult res = new ResolveResult(ResultKind.INTERNAL_DIR, - targetFs, root.fullPath, SlashPath); - return res; + getRootDir().getInternalDirFs() + : getRootLink().getTargetFileSystem(); + resolveResult = new ResolveResult(ResultKind.INTERNAL_DIR, + targetFs, root.fullPath, SlashPath, false); + return resolveResult; } /** @@ -651,22 +762,30 @@ ResolveResult resolve(final String p, final boolean resolveLastComponent) remainingPathStr.append("/").append(path[i]); } remainingPath = new Path(remainingPathStr.toString()); - ResolveResult res = new ResolveResult(ResultKind.EXTERNAL_DIR, - getRootLink().getTargetFileSystem(), root.fullPath, remainingPath); - return res; + resolveResult = new ResolveResult(ResultKind.EXTERNAL_DIR, + getRootLink().getTargetFileSystem(), root.fullPath, remainingPath, + true); + return resolveResult; } Preconditions.checkState(root.isInternalDir()); INodeDir curInode = getRootDir(); + // Try to resolve path in the regex mount point + resolveResult = tryResolveInRegexMountpoint(p, resolveLastComponent); + if (resolveResult != null) { + return resolveResult; + } + int i; // ignore first slash for (i = 1; i < path.length - (resolveLastComponent ? 0 : 1); i++) { INode nextInode = curInode.resolveInternal(path[i]); if (nextInode == null) { if (hasFallbackLink()) { - return new ResolveResult(ResultKind.EXTERNAL_DIR, - getRootFallbackLink().getTargetFileSystem(), - root.fullPath, new Path(p)); + resolveResult = new ResolveResult(ResultKind.EXTERNAL_DIR, + getRootFallbackLink().getTargetFileSystem(), root.fullPath, + new Path(p), false); + return resolveResult; } else { StringBuilder failedAt = new StringBuilder(path[0]); for (int j = 1; j <= i; ++j) { @@ -683,16 +802,17 @@ ResolveResult resolve(final String p, final boolean resolveLastComponent) if (i >= path.length - 1) { remainingPath = SlashPath; } else { - StringBuilder remainingPathStr = new StringBuilder("/" + path[i + 1]); + StringBuilder remainingPathStr = + new StringBuilder("/" + path[i + 1]); for (int j = i + 2; j < path.length; ++j) { remainingPathStr.append('/').append(path[j]); } remainingPath = new Path(remainingPathStr.toString()); } - final ResolveResult res = - new ResolveResult(ResultKind.EXTERNAL_DIR, - link.getTargetFileSystem(), nextInode.fullPath, remainingPath); - return res; + resolveResult = new ResolveResult(ResultKind.EXTERNAL_DIR, + link.getTargetFileSystem(), nextInode.fullPath, remainingPath, + true); + return resolveResult; } else if (nextInode.isInternalDir()) { curInode = (INodeDir) nextInode; } @@ -713,10 +833,77 @@ ResolveResult resolve(final String p, final boolean resolveLastComponent) } remainingPath = new Path(remainingPathStr.toString()); } - final ResolveResult res = - new ResolveResult(ResultKind.INTERNAL_DIR, - curInode.getInternalDirFs(), curInode.fullPath, remainingPath); - return res; + resolveResult = new ResolveResult(ResultKind.INTERNAL_DIR, + curInode.getInternalDirFs(), curInode.fullPath, remainingPath, false); + return resolveResult; + } + + /** + * Walk through all regex mount points to see + * whether the path match any regex expressions. + * E.g. link: ^/user/(?\\w+) => s3://$user.apache.com/_${user} + * srcPath: is /user/hadoop/dir1 + * resolveLastComponent: true + * then return value is s3://hadoop.apache.com/_hadoop + * + * @param srcPath + * @param resolveLastComponent + * @return + */ + protected ResolveResult tryResolveInRegexMountpoint(final String srcPath, + final boolean resolveLastComponent) { + for (RegexMountPoint regexMountPoint : regexMountPointList) { + ResolveResult resolveResult = + regexMountPoint.resolve(srcPath, resolveLastComponent); + if (resolveResult != null) { + return resolveResult; + } + } + return null; + } + + /** + * Build resolve result. + * Here's an example + * Mountpoint: fs.viewfs.mounttable.mt + * .linkRegex.replaceresolveddstpath:_:-#.^/user/(?\w+) + * Value: /targetTestRoot/$username + * Dir path to test: + * viewfs://mt/user/hadoop_user1/hadoop_dir1 + * Expect path: /targetTestRoot/hadoop-user1/hadoop_dir1 + * resolvedPathStr: /user/hadoop_user1 + * targetOfResolvedPathStr: /targetTestRoot/hadoop-user1 + * remainingPath: /hadoop_dir1 + * + * @return targetFileSystem or null on exceptions. + */ + protected ResolveResult buildResolveResultForRegexMountPoint( + ResultKind resultKind, String resolvedPathStr, + String targetOfResolvedPathStr, Path remainingPath) { + try { + T targetFs = getTargetFileSystem( + new URI(targetOfResolvedPathStr)); + return new ResolveResult(resultKind, targetFs, resolvedPathStr, + remainingPath, true); + } catch (IOException ex) { + LOGGER.error(String.format( + "Got Exception while build resolve result." + + " ResultKind:%s, resolvedPathStr:%s," + + " targetOfResolvedPathStr:%s, remainingPath:%s," + + " will return null.", + resultKind, resolvedPathStr, targetOfResolvedPathStr, remainingPath), + ex); + return null; + } catch (URISyntaxException uex) { + LOGGER.error(String.format( + "Got Exception while build resolve result." + + " ResultKind:%s, resolvedPathStr:%s," + + " targetOfResolvedPathStr:%s, remainingPath:%s," + + " will return null.", + resultKind, resolvedPathStr, targetOfResolvedPathStr, remainingPath), + uex); + return null; + } } List> getMountPoints() { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/MountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/MountTableConfigLoader.java new file mode 100644 index 0000000000000..bc2c3ea93c58c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/MountTableConfigLoader.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; + +/** + * An interface for loading mount-table configuration. This class can have more + * APIs like refreshing mount tables automatically etc. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface MountTableConfigLoader { + + /** + * Loads the mount-table configuration into given configuration. + * + * @param mountTableConfigPath - Path of the mount table. It can be a file or + * a directory in the case of multiple versions of mount-table + * files(Recommended option). + * @param conf - Configuration object to add mount table. + */ + void load(String mountTableConfigPath, Configuration conf) + throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java index a406d77f2ef6c..85af68af31434 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NflyFSystem.java @@ -212,6 +212,21 @@ private static String getRack(String rackString) { */ private NflyFSystem(URI[] uris, Configuration conf, int minReplication, EnumSet nflyFlags) throws IOException { + this(uris, conf, minReplication, nflyFlags, null); + } + + /** + * Creates a new Nfly instance. + * + * @param uris the list of uris in the mount point + * @param conf configuration object + * @param minReplication minimum copies to commit a write op + * @param nflyFlags modes such readMostRecent + * @param fsGetter to get the file system instance with the given uri + * @throws IOException + */ + private NflyFSystem(URI[] uris, Configuration conf, int minReplication, + EnumSet nflyFlags, FsGetter fsGetter) throws IOException { if (uris.length < minReplication) { throw new IOException(minReplication + " < " + uris.length + ": Minimum replication < #destinations"); @@ -238,8 +253,14 @@ private NflyFSystem(URI[] uris, Configuration conf, int minReplication, nodes = new NflyNode[uris.length]; final Iterator rackIter = rackStrings.iterator(); for (int i = 0; i < nodes.length; i++) { - nodes[i] = new NflyNode(hostStrings.get(i), rackIter.next(), uris[i], - conf); + if (fsGetter != null) { + nodes[i] = new NflyNode(hostStrings.get(i), rackIter.next(), + new ChRootedFileSystem(fsGetter.getNewInstance(uris[i], conf), + uris[i])); + } else { + nodes[i] = + new NflyNode(hostStrings.get(i), rackIter.next(), uris[i], conf); + } } // sort all the uri's by distance from myNode, the local file system will // automatically be the the first one. @@ -921,7 +942,7 @@ private static void processThrowable(NflyNode nflyNode, String op, * @throws IOException */ static FileSystem createFileSystem(URI[] uris, Configuration conf, - String settings) throws IOException { + String settings, FsGetter fsGetter) throws IOException { // assert settings != null int minRepl = DEFAULT_MIN_REPLICATION; EnumSet nflyFlags = EnumSet.noneOf(NflyKey.class); @@ -946,6 +967,6 @@ static FileSystem createFileSystem(URI[] uris, Configuration conf, throw new IllegalArgumentException(nflyKey + ": Infeasible"); } } - return new NflyFSystem(uris, conf, minRepl, nflyFlags); + return new NflyFSystem(uris, conf, minRepl, nflyFlags, fsGetter); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPoint.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPoint.java new file mode 100644 index 0000000000000..aace7a2dba57d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPoint.java @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.fs.viewfs.InodeTree.SlashPath; + +/** + * Regex mount point is build to implement regex based mount point. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +class RegexMountPoint { + private static final Logger LOGGER = + LoggerFactory.getLogger(RegexMountPoint.class.getName()); + + private InodeTree inodeTree; + private String srcPathRegex; + private Pattern srcPattern; + private String dstPath; + private String interceptorSettingsString; + private List interceptorList; + + public static final String SETTING_SRCREGEX_SEP = "#."; + public static final char INTERCEPTOR_SEP = ';'; + public static final char INTERCEPTOR_INTERNAL_SEP = ':'; + // ${var},$var + public static final Pattern VAR_PATTERN_IN_DEST = + Pattern.compile("\\$((\\{\\w+\\})|(\\w+))"); + + // Same var might have different representations. + // e.g. + // key => $key or key = > ${key} + private Map> varInDestPathMap; + + public Map> getVarInDestPathMap() { + return varInDestPathMap; + } + + RegexMountPoint(InodeTree inodeTree, String sourcePathRegex, + String destPath, String settingsStr) { + this.inodeTree = inodeTree; + this.srcPathRegex = sourcePathRegex; + this.dstPath = destPath; + this.interceptorSettingsString = settingsStr; + this.interceptorList = new ArrayList<>(); + } + + /** + * Initialize regex mount point. + * + * @throws IOException + */ + public void initialize() throws IOException { + try { + srcPattern = Pattern.compile(srcPathRegex); + } catch (PatternSyntaxException ex) { + throw new IOException( + "Failed to initialized mount point due to bad src path regex:" + + srcPathRegex + ", dstPath:" + dstPath, ex); + } + varInDestPathMap = getVarListInString(dstPath); + initializeInterceptors(); + } + + private void initializeInterceptors() throws IOException { + if (interceptorSettingsString == null + || interceptorSettingsString.isEmpty()) { + return; + } + String[] interceptorStrArray = + StringUtils.split(interceptorSettingsString, INTERCEPTOR_SEP); + for (String interceptorStr : interceptorStrArray) { + RegexMountPointInterceptor interceptor = + RegexMountPointInterceptorFactory.create(interceptorStr); + if (interceptor == null) { + throw new IOException( + "Illegal settings String " + interceptorSettingsString); + } + interceptor.initialize(); + interceptorList.add(interceptor); + } + } + + /** + * Get $var1 and $var2 style variables in string. + * + * @param input - the string to be process. + * @return + */ + public static Map> getVarListInString(String input) { + Map> varMap = new HashMap<>(); + Matcher matcher = VAR_PATTERN_IN_DEST.matcher(input); + while (matcher.find()) { + // $var or ${var} + String varName = matcher.group(0); + // var or {var} + String strippedVarName = matcher.group(1); + if (strippedVarName.startsWith("{")) { + // {varName} = > varName + strippedVarName = + strippedVarName.substring(1, strippedVarName.length() - 1); + } + varMap.putIfAbsent(strippedVarName, new HashSet<>()); + varMap.get(strippedVarName).add(varName); + } + return varMap; + } + + public String getSrcPathRegex() { + return srcPathRegex; + } + + public Pattern getSrcPattern() { + return srcPattern; + } + + public String getDstPath() { + return dstPath; + } + + public static Pattern getVarPatternInDest() { + return VAR_PATTERN_IN_DEST; + } + + /** + * Get resolved path from regex mount points. + * E.g. link: ^/user/(?\\w+) => s3://$user.apache.com/_${user} + * srcPath: is /user/hadoop/dir1 + * resolveLastComponent: true + * then return value is s3://hadoop.apache.com/_hadoop + * @param srcPath - the src path to resolve + * @param resolveLastComponent - whether resolve the path after last `/` + * @return mapped path of the mount point. + */ + public InodeTree.ResolveResult resolve(final String srcPath, + final boolean resolveLastComponent) { + String pathStrToResolve = getPathToResolve(srcPath, resolveLastComponent); + for (RegexMountPointInterceptor interceptor : interceptorList) { + pathStrToResolve = interceptor.interceptSource(pathStrToResolve); + } + LOGGER.debug("Path to resolve:" + pathStrToResolve + ", srcPattern:" + + getSrcPathRegex()); + Matcher srcMatcher = getSrcPattern().matcher(pathStrToResolve); + String parsedDestPath = getDstPath(); + int mappedCount = 0; + String resolvedPathStr = ""; + while (srcMatcher.find()) { + resolvedPathStr = pathStrToResolve.substring(0, srcMatcher.end()); + Map> varMap = getVarInDestPathMap(); + for (Map.Entry> entry : varMap.entrySet()) { + String regexGroupNameOrIndexStr = entry.getKey(); + Set groupRepresentationStrSetInDest = entry.getValue(); + parsedDestPath = replaceRegexCaptureGroupInPath( + parsedDestPath, srcMatcher, + regexGroupNameOrIndexStr, groupRepresentationStrSetInDest); + } + ++mappedCount; + } + if (0 == mappedCount) { + return null; + } + Path remainingPath = getRemainingPathStr(srcPath, resolvedPathStr); + for (RegexMountPointInterceptor interceptor : interceptorList) { + parsedDestPath = interceptor.interceptResolvedDestPathStr(parsedDestPath); + remainingPath = + interceptor.interceptRemainingPath(remainingPath); + } + InodeTree.ResolveResult resolveResult = inodeTree + .buildResolveResultForRegexMountPoint(InodeTree.ResultKind.EXTERNAL_DIR, + resolvedPathStr, parsedDestPath, remainingPath); + return resolveResult; + } + + private Path getRemainingPathStr( + String srcPath, + String resolvedPathStr) { + String remainingPathStr = srcPath.substring(resolvedPathStr.length()); + if (!remainingPathStr.startsWith("/")) { + remainingPathStr = "/" + remainingPathStr; + } + return new Path(remainingPathStr); + } + + private String getPathToResolve( + String srcPath, boolean resolveLastComponent) { + if (resolveLastComponent) { + return srcPath; + } + int lastSlashIndex = srcPath.lastIndexOf(SlashPath.toString()); + if (lastSlashIndex == -1) { + return null; + } + return srcPath.substring(0, lastSlashIndex); + } + + /** + * Use capture group named regexGroupNameOrIndexStr in mather to replace + * parsedDestPath. + * E.g. link: ^/user/(?\\w+) => s3://$user.apache.com/_${user} + * srcMatcher is from /user/hadoop. + * Then the params will be like following. + * parsedDestPath: s3://$user.apache.com/_${user}, + * regexGroupNameOrIndexStr: user + * groupRepresentationStrSetInDest: {user:$user; user:${user}} + * return value will be s3://hadoop.apache.com/_hadoop + * @param parsedDestPath + * @param srcMatcher + * @param regexGroupNameOrIndexStr + * @param groupRepresentationStrSetInDest + * @return return parsedDestPath while ${var},$var replaced or + * parsedDestPath nothing found. + */ + private String replaceRegexCaptureGroupInPath( + String parsedDestPath, + Matcher srcMatcher, + String regexGroupNameOrIndexStr, + Set groupRepresentationStrSetInDest) { + String groupValue = getRegexGroupValueFromMather( + srcMatcher, regexGroupNameOrIndexStr); + if (groupValue == null) { + return parsedDestPath; + } + for (String varName : groupRepresentationStrSetInDest) { + parsedDestPath = parsedDestPath.replace(varName, groupValue); + LOGGER.debug("parsedDestPath value is:" + parsedDestPath); + } + return parsedDestPath; + } + + /** + * Get matched capture group value from regex matched string. E.g. + * Regex: ^/user/(?\\w+), regexGroupNameOrIndexStr: userName + * then /user/hadoop should return hadoop while call + * getRegexGroupValueFromMather(matcher, usersName) + * or getRegexGroupValueFromMather(matcher, 1) + * + * @param srcMatcher - the matcher to be use + * @param regexGroupNameOrIndexStr - the regex group name or index + * @return - Null if no matched group named regexGroupNameOrIndexStr found. + */ + private String getRegexGroupValueFromMather( + Matcher srcMatcher, String regexGroupNameOrIndexStr) { + if (regexGroupNameOrIndexStr.matches("\\d+")) { + // group index + int groupIndex = Integer.parseUnsignedInt(regexGroupNameOrIndexStr); + if (groupIndex >= 0 && groupIndex <= srcMatcher.groupCount()) { + return srcMatcher.group(groupIndex); + } + } else { + // named group in regex + return srcMatcher.group(regexGroupNameOrIndexStr); + } + return null; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointInterceptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointInterceptor.java new file mode 100644 index 0000000000000..37f44b0a51579 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointInterceptor.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; + +/** + * RegexMountPointInterceptor is a mechanism provided to intercept + * src and resolved path before/after resolution. + */ +@InterfaceAudience.LimitedPrivate("Common") +@InterfaceStability.Unstable +interface RegexMountPointInterceptor { + + /** + * Initialize interceptor and throws IOException if needed. + * @throws IOException + */ + void initialize() throws IOException; + + /** + * Intercept source before resolution. + * @param source + * @return + */ + String interceptSource(String source); + + /** + * Intercept parsed dest path and return a new one. + * @return intercepted string + */ + String interceptResolvedDestPathStr(String parsedDestPathStr); + + /** + * Intercept remaining path. + * @return intercepted string + */ + Path interceptRemainingPath(Path remainingPath); + + /** + * Get interceptor type. + * @return + */ + RegexMountPointInterceptorType getType(); + + /** + * Serialize the interceptor to a string. + * @return + */ + String serializeToString(); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointInterceptorFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointInterceptorFactory.java new file mode 100644 index 0000000000000..fb564aa3a6e4d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointInterceptorFactory.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * The interceptor factory used to create RegexMountPoint interceptors. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +final class RegexMountPointInterceptorFactory { + + private RegexMountPointInterceptorFactory() { + + } + + /** + * interceptorSettingsString string should be like ${type}:${string}, + * e.g. replaceresolveddstpath:word1,word2. + * + * @param interceptorSettingsString + * @return Return interceptor based on setting or null on bad/unknown config. + */ + public static RegexMountPointInterceptor create( + String interceptorSettingsString) { + int typeTagIndex = interceptorSettingsString + .indexOf(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP); + if (typeTagIndex == -1 || (typeTagIndex == ( + interceptorSettingsString.length() - 1))) { + return null; + } + String typeTag = interceptorSettingsString.substring(0, typeTagIndex).trim() + .toLowerCase(); + RegexMountPointInterceptorType interceptorType = + RegexMountPointInterceptorType.get(typeTag); + if (interceptorType == null) { + return null; + } + switch (interceptorType) { + case REPLACE_RESOLVED_DST_PATH: + RegexMountPointInterceptor interceptor = + RegexMountPointResolvedDstPathReplaceInterceptor + .deserializeFromString(interceptorSettingsString); + return interceptor; + default: + // impossible now + return null; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointInterceptorType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointInterceptorType.java new file mode 100644 index 0000000000000..ad953eba24ad9 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointInterceptorType.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.util.HashMap; +import java.util.Map; + +/** + * RegexMountPointInterceptorType. + */ +public enum RegexMountPointInterceptorType { + REPLACE_RESOLVED_DST_PATH("replaceresolveddstpath"); + + private final String configName; + private static final Map + INTERCEPTOR_TYPE_MAP + = new HashMap(); + + static { + for (RegexMountPointInterceptorType interceptorType + : RegexMountPointInterceptorType.values()) { + INTERCEPTOR_TYPE_MAP.put( + interceptorType.getConfigName(), interceptorType); + } + } + + RegexMountPointInterceptorType(String configName) { + this.configName = configName; + } + + public String getConfigName() { + return configName; + } + + public static RegexMountPointInterceptorType get(String configName) { + return INTERCEPTOR_TYPE_MAP.get(configName); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointResolvedDstPathReplaceInterceptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointResolvedDstPathReplaceInterceptor.java new file mode 100644 index 0000000000000..18490dc57c5ac --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/RegexMountPointResolvedDstPathReplaceInterceptor.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; + +import static org.apache.hadoop.fs.viewfs.RegexMountPointInterceptorType.REPLACE_RESOLVED_DST_PATH; + +/** + * Implementation of RegexMountPointResolvedDstPathReplaceInterceptor. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +class RegexMountPointResolvedDstPathReplaceInterceptor + implements RegexMountPointInterceptor { + + private String srcRegexString; + private String replaceString; + private Pattern srcRegexPattern; + + RegexMountPointResolvedDstPathReplaceInterceptor(String srcRegex, + String replaceString) { + this.srcRegexString = srcRegex; + this.replaceString = replaceString; + this.srcRegexPattern = null; + } + + public String getSrcRegexString() { + return srcRegexString; + } + + public String getReplaceString() { + return replaceString; + } + + public Pattern getSrcRegexPattern() { + return srcRegexPattern; + } + + @Override + public void initialize() throws IOException { + try { + srcRegexPattern = Pattern.compile(srcRegexString); + } catch (PatternSyntaxException ex) { + throw new IOException( + "Initialize interceptor failed, srcRegx:" + srcRegexString, ex); + } + } + + /** + * Source won't be changed in the interceptor. + * + * @return source param string passed in. + */ + @Override + public String interceptSource(String source) { + return source; + } + + /** + * Intercept resolved path, e.g. + * Mount point /^(\\w+)/, ${1}.hadoop.net + * If incoming path is /user1/home/tmp/job1, + * then the resolved path str will be user1. + * + * @return intercepted string + */ + @Override + public String interceptResolvedDestPathStr( + String parsedDestPathStr) { + Matcher matcher = srcRegexPattern.matcher(parsedDestPathStr); + return matcher.replaceAll(replaceString); + } + + /** + * The interceptRemainingPath will just return the remainingPath passed in. + * + */ + @Override + public Path interceptRemainingPath(Path remainingPath) { + return remainingPath; + } + + @Override + public RegexMountPointInterceptorType getType() { + return REPLACE_RESOLVED_DST_PATH; + } + + @Override + public String serializeToString() { + return REPLACE_RESOLVED_DST_PATH.getConfigName() + + RegexMountPoint.INTERCEPTOR_INTERNAL_SEP + srcRegexString + + RegexMountPoint.INTERCEPTOR_INTERNAL_SEP + replaceString; + } + + /** + * Create interceptor from config string. The string should be in + * replaceresolvedpath:wordToReplace:replaceString + * Note that we'll assume there's no ':' in the regex for the moment. + * + * @return Interceptor instance or null on bad config. + */ + public static RegexMountPointResolvedDstPathReplaceInterceptor + deserializeFromString(String serializedString) { + String[] strings = serializedString + .split(Character.toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP)); + // We'll assume there's no ':' in the regex for the moment. + if (strings.length != 3) { + return null; + } + //The format should be like replaceresolvedpath:wordToReplace:replaceString + return new RegexMountPointResolvedDstPathReplaceInterceptor(strings[1], + strings[2]); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index faa374a39789b..473b51489a483 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -18,9 +18,13 @@ package org.apache.hadoop.fs.viewfs; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; -import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE; import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT; +import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import java.io.FileNotFoundException; import java.io.IOException; @@ -29,16 +33,17 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; import java.util.Set; -import java.util.Map.Entry; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -96,20 +101,48 @@ static AccessControlException readOnlyMountTable(final String operation, return readOnlyMountTable(operation, p.toString()); } + /** + * Gets file system creator instance. + */ + protected FsGetter fsGetter() { + return new FsGetter(); + } + /** * Caching children filesystems. HADOOP-15565. */ static class InnerCache { private Map map = new HashMap<>(); + private FsGetter fsCreator; + private ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock(); + + InnerCache(FsGetter fsCreator) { + this.fsCreator = fsCreator; + } FileSystem get(URI uri, Configuration config) throws IOException { Key key = new Key(uri); - if (map.get(key) == null) { - FileSystem fs = FileSystem.newInstance(uri, config); + FileSystem fs = null; + try { + rwLock.readLock().lock(); + fs = map.get(key); + if (fs != null) { + return fs; + } + } finally { + rwLock.readLock().unlock(); + } + try { + rwLock.writeLock().lock(); + fs = map.get(key); + if (fs != null) { + return fs; + } + fs = fsCreator.getNewInstance(uri, config); map.put(key, fs); return fs; - } else { - return map.get(key); + } finally { + rwLock.writeLock().unlock(); } } @@ -123,9 +156,13 @@ void closeAll() { } } - InnerCache unmodifiableCache() { - map = Collections.unmodifiableMap(map); - return this; + void clear() { + try { + rwLock.writeLock().lock(); + map.clear(); + } finally { + rwLock.writeLock().unlock(); + } } /** @@ -193,7 +230,7 @@ public URI[] getTargetFileSystemURIs() { final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable - URI myUri; + private URI myUri; private Path workingDir; Configuration config; InodeTree fsState; // the fs state; ie the mount table @@ -241,6 +278,15 @@ public String getScheme() { return FsConstants.VIEWFS_SCHEME; } + /** + * Returns false as it does not support to add fallback link automatically on + * no mounts. + */ + boolean supportAutoAddingFallbackOnNoMounts() { + return false; + } + + /** * Called after a new FileSystem instance is constructed. * @param theUri a uri whose authority section names the host, port, etc. for @@ -255,35 +301,46 @@ public void initialize(final URI theUri, final Configuration conf) config = conf; enableInnerCache = config.getBoolean(CONFIG_VIEWFS_ENABLE_INNER_CACHE, CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT); - final InnerCache innerCache = new InnerCache(); + FsGetter fsGetter = fsGetter(); + final InnerCache innerCache = new InnerCache(fsGetter); // Now build client side view (i.e. client side mount table) from config. final String authority = theUri.getAuthority(); + String tableName = authority; + if (theUri.getPort() != -1 && config + .getBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT)) { + tableName = theUri.getHost(); + } try { - myUri = new URI(FsConstants.VIEWFS_SCHEME, authority, "/", null, null); - fsState = new InodeTree(conf, authority) { - + myUri = new URI(getScheme(), authority, "/", null, null); + boolean initingUriAsFallbackOnNoMounts = + supportAutoAddingFallbackOnNoMounts(); + fsState = new InodeTree(conf, tableName, myUri, + initingUriAsFallbackOnNoMounts) { @Override protected FileSystem getTargetFileSystem(final URI uri) throws URISyntaxException, IOException { - FileSystem fs; - if (enableInnerCache) { - fs = innerCache.get(uri, config); - } else { - fs = FileSystem.get(uri, config); - } - return new ChRootedFileSystem(fs, uri); + FileSystem fs; + if (enableInnerCache) { + fs = innerCache.get(uri, config); + } else { + fs = fsGetter.get(uri, config); + } + return new ChRootedFileSystem(fs, uri); } @Override protected FileSystem getTargetFileSystem(final INodeDir dir) - throws URISyntaxException { - return new InternalDirOfViewFs(dir, creationTime, ugi, myUri, config); + throws URISyntaxException { + return new InternalDirOfViewFs(dir, creationTime, ugi, myUri, config, + this); } @Override protected FileSystem getTargetFileSystem(final String settings, final URI[] uris) throws URISyntaxException, IOException { - return NflyFSystem.createFileSystem(uris, config, settings); + return NflyFSystem.createFileSystem(uris, config, settings, + fsGetter); } }; workingDir = this.getHomeDirectory(); @@ -298,7 +355,7 @@ protected FileSystem getTargetFileSystem(final String settings, // All fs instances are created and cached on startup. The cache is // readonly after the initialize() so the concurrent access of the cache // is safe. - cache = innerCache.unmodifiableCache(); + cache = innerCache; } } @@ -370,7 +427,7 @@ public FSDataOutputStream append(final Path f, final int bufferSize, fsState.resolve(getUriPath(f), true); return res.targetFileSystem.append(res.remainingPath, bufferSize, progress); } - + @Override public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet flags, int bufferSize, short replication, @@ -475,6 +532,14 @@ private static FileStatus wrapLocalFileStatus(FileStatus orig, : new ViewFsFileStatus(orig, qualified); } + /** + * {@inheritDoc} + * + * If the given path is a symlink(mount link), the path will be resolved to a + * target path and it will get the resolved path's FileStatus object. It will + * not be represented as a symlink and isDirectory API returns true if the + * resolved path is a directory, false otherwise. + */ @Override public FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { @@ -492,6 +557,33 @@ public void access(Path path, FsAction mode) throws AccessControlException, res.targetFileSystem.access(res.remainingPath, mode); } + /** + * {@inheritDoc} + * + * Note: listStatus considers listing from fallbackLink if available. If the + * same directory path is present in configured mount path as well as in + * fallback fs, then only the fallback path will be listed in the returned + * result except for link. + * + * If any of the the immediate children of the given path f is a symlink(mount + * link), the returned FileStatus object of that children would be represented + * as a symlink. It will not be resolved to the target path and will not get + * the target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * This behavior can be changed by setting an advanced configuration + * fs.viewfs.mount.links.as.symlinks to false. In this case, mount points will + * be represented as non-symlinks and all the file/directory attributes like + * permissions, isDirectory etc will be assigned from it's resolved target + * directory/file. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link ViewFileSystem#getFileStatus(Path f)} + * + * Note: In ViewFileSystem, by default the mount links are represented as + * symlinks. + */ @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { @@ -578,18 +670,52 @@ public FSDataInputStream open(final Path f, final int bufferSize) @Override public boolean rename(final Path src, final Path dst) throws IOException { // passing resolveLastComponet as false to catch renaming a mount point to - // itself. We need to catch this as an internal operation and fail. - InodeTree.ResolveResult resSrc = - fsState.resolve(getUriPath(src), false); - + // itself. We need to catch this as an internal operation and fail if no + // fallback. + InodeTree.ResolveResult resSrc = + fsState.resolve(getUriPath(src), false); + if (resSrc.isInternalDir()) { - throw readOnlyMountTable("rename", src); + if (fsState.getRootFallbackLink() == null) { + // If fallback is null, we can't rename from src. + throw readOnlyMountTable("rename", src); + } + InodeTree.ResolveResult resSrcWithLastComp = + fsState.resolve(getUriPath(src), true); + if (resSrcWithLastComp.isInternalDir() || resSrcWithLastComp + .isLastInternalDirLink()) { + throw readOnlyMountTable("rename", src); + } else { + // This is fallback and let's set the src fs with this fallback + resSrc = resSrcWithLastComp; + } } - - InodeTree.ResolveResult resDst = - fsState.resolve(getUriPath(dst), false); + + InodeTree.ResolveResult resDst = + fsState.resolve(getUriPath(dst), false); + if (resDst.isInternalDir()) { - throw readOnlyMountTable("rename", dst); + if (fsState.getRootFallbackLink() == null) { + // If fallback is null, we can't rename to dst. + throw readOnlyMountTable("rename", dst); + } + // if the fallback exist, we may have chance to rename to fallback path + // where dst parent is matching to internalDir. + InodeTree.ResolveResult resDstWithLastComp = + fsState.resolve(getUriPath(dst), true); + if (resDstWithLastComp.isInternalDir()) { + // We need to get fallback here. If matching fallback path not exist, it + // will fail later. This is a very special case: Even though we are on + // internal directory, we should allow to rename, so that src files will + // moved under matching fallback dir. + resDst = new InodeTree.ResolveResult( + InodeTree.ResultKind.INTERNAL_DIR, + fsState.getRootFallbackLink().getTargetFileSystem(), "/", + new Path(resDstWithLastComp.resolvedPath), false); + } else { + // The link resolved to some target fs or fallback fs. + resDst = resDstWithLastComp; + } } URI srcUri = resSrc.targetFileSystem.getUri(); @@ -867,6 +993,12 @@ public FileSystem[] getChildFileSystems() { FileSystem targetFs = mountPoint.target.targetFileSystem; children.addAll(Arrays.asList(targetFs.getChildFileSystems())); } + + if (fsState.isRootInternalDir() && fsState.getRootFallbackLink() != null) { + children.addAll(Arrays.asList( + fsState.getRootFallbackLink().targetFileSystem + .getChildFileSystems())); + } return children.toArray(new FileSystem[]{}); } @@ -1074,11 +1206,14 @@ static class InternalDirOfViewFs extends FileSystem { final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final URI myUri; + private final boolean showMountLinksAsSymlinks; + private InodeTree fsState; public InternalDirOfViewFs(final InodeTree.INodeDir dir, final long cTime, final UserGroupInformation ugi, URI uri, - Configuration config) throws URISyntaxException { + Configuration config, InodeTree fsState) throws URISyntaxException { myUri = uri; + this.fsState = fsState; try { initialize(myUri, config); } catch (IOException e) { @@ -1087,6 +1222,9 @@ public InternalDirOfViewFs(final InodeTree.INodeDir dir, theInternalDir = dir; creationTime = cTime; this.ugi = ugi; + showMountLinksAsSymlinks = config + .getBoolean(CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT); } static private void checkPathIsSlash(final Path f) throws IOException { @@ -1123,7 +1261,41 @@ public FSDataOutputStream append(final Path f, final int bufferSize, public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, - final Progressable progress) throws AccessControlException { + final Progressable progress) throws IOException { + Preconditions.checkNotNull(f, "File cannot be null."); + if (InodeTree.SlashPath.equals(f)) { + throw new FileAlreadyExistsException( + "/ is not a file. The directory / already exist at: " + + theInternalDir.fullPath); + } + + if (this.fsState.getRootFallbackLink() != null) { + + if (theInternalDir.getChildren().containsKey(f.getName())) { + throw new FileAlreadyExistsException( + "A mount path(file/dir) already exist with the requested path: " + + theInternalDir.getChildren().get(f.getName()).fullPath); + } + + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leaf = f.getName(); + Path fileToCreate = new Path(parent, leaf); + + try { + return linkedFallbackFs + .create(fileToCreate, permission, overwrite, bufferSize, + replication, blockSize, progress); + } catch (IOException e) { + StringBuilder msg = + new StringBuilder("Failed to create file:").append(fileToCreate) + .append(" at fallback : ").append(linkedFallbackFs.getUri()); + LOG.error(msg.toString(), e); + throw e; + } + } throw readOnlyMountTable("create", f); } @@ -1145,6 +1317,23 @@ public boolean delete(final Path f) public BlockLocation[] getFileBlockLocations(final FileStatus fs, final long start, final long len) throws FileNotFoundException, IOException { + + // When application calls listFiles on internalDir, it would return + // RemoteIterator from InternalDirOfViewFs. If there is a fallBack, there + // is a chance of files exists under that internalDir in fallback. + // Iterator#next will call getFileBlockLocations with that files. So, we + // should return getFileBlockLocations on fallback. See HDFS-15532. + if (!InodeTree.SlashPath.equals(fs.getPath()) && this.fsState + .getRootFallbackLink() != null) { + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + Path pathToFallbackFs = new Path(parent, fs.getPath().getName()); + return linkedFallbackFs + .getFileBlockLocations(pathToFallbackFs, start, len); + } + checkPathIsSlash(fs.getPath()); throw new FileNotFoundException("Path points to dir not a file"); } @@ -1161,7 +1350,6 @@ public FileStatus getFileStatus(Path f) throws IOException { checkPathIsSlash(f); return new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), - new Path(theInternalDir.fullPath).makeQualified( myUri, ROOT_PATH)); } @@ -1171,34 +1359,147 @@ public FileStatus getFileStatus(Path f) throws IOException { public FileStatus[] listStatus(Path f) throws AccessControlException, FileNotFoundException, IOException { checkPathIsSlash(f); - FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()]; + FileStatus[] fallbackStatuses = listStatusForFallbackLink(); + Set linkStatuses = new HashSet<>(); + Set internalDirStatuses = new HashSet<>(); int i = 0; for (Entry> iEntry : theInternalDir.getChildren().entrySet()) { INode inode = iEntry.getValue(); + Path path = new Path(inode.fullPath).makeQualified(myUri, null); if (inode.isLink()) { INodeLink link = (INodeLink) inode; - result[i++] = new FileStatus(0, false, 0, 0, - creationTime, creationTime, PERMISSION_555, - ugi.getShortUserName(), ugi.getPrimaryGroupName(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + if (showMountLinksAsSymlinks) { + // To maintain backward compatibility, with default option(showing + // mount links as symlinks), we will represent target link as + // symlink and rest other properties are belongs to mount link only. + linkStatuses.add( + new FileStatus(0, false, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), link.getTargetLink(), path)); + continue; + } + + // We will represent as non-symlinks. Here it will show target + // directory/file properties like permissions, isDirectory etc on + // mount path. The path will be a mount link path and isDirectory is + // true if target is dir, otherwise false. + String linkedPath = link.getTargetFileSystem().getUri().getPath(); + if ("".equals(linkedPath)) { + linkedPath = "/"; + } + try { + FileStatus status = + ((ChRootedFileSystem)link.getTargetFileSystem()) + .getMyFs().getFileStatus(new Path(linkedPath)); + linkStatuses.add( + new FileStatus(status.getLen(), status.isDirectory(), + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), + status.getGroup(), null, path)); + } catch (FileNotFoundException ex) { + LOG.warn("Cannot get one of the children's(" + path + + ") target path(" + link.getTargetFileSystem().getUri() + + ") file status.", ex); + throw ex; + } } else { - result[i++] = new FileStatus(0, true, 0, 0, - creationTime, creationTime, PERMISSION_555, - ugi.getShortUserName(), ugi.getGroupNames()[0], - new Path(inode.fullPath).makeQualified( - myUri, null)); + internalDirStatuses.add( + new FileStatus(0, true, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), path)); + } + } + FileStatus[] internalDirStatusesMergedWithFallBack = internalDirStatuses + .toArray(new FileStatus[internalDirStatuses.size()]); + if (fallbackStatuses.length > 0) { + internalDirStatusesMergedWithFallBack = + merge(fallbackStatuses, internalDirStatusesMergedWithFallBack); + } + // Links will always have precedence than internalDir or fallback paths. + return merge(linkStatuses.toArray(new FileStatus[linkStatuses.size()]), + internalDirStatusesMergedWithFallBack); + } + + private FileStatus[] merge(FileStatus[] toStatuses, + FileStatus[] fromStatuses) { + ArrayList result = new ArrayList<>(); + Set pathSet = new HashSet<>(); + for (FileStatus status : toStatuses) { + result.add(status); + pathSet.add(status.getPath().getName()); + } + for (FileStatus status : fromStatuses) { + if (!pathSet.contains(status.getPath().getName())) { + result.add(status); } } - return result; + return result.toArray(new FileStatus[result.size()]); + } + + private FileStatus[] listStatusForFallbackLink() throws IOException { + if (this.fsState.getRootFallbackLink() != null) { + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path p = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + if (theInternalDir.isRoot() || linkedFallbackFs.exists(p)) { + FileStatus[] statuses = linkedFallbackFs.listStatus(p); + for (FileStatus status : statuses) { + // Fix the path back to viewfs scheme + Path pathFromConfiguredFallbackRoot = + new Path(p, status.getPath().getName()); + status.setPath( + new Path(myUri.toString(), pathFromConfiguredFallbackRoot)); + } + return statuses; + } + } + return new FileStatus[0]; + } + + @Override + public ContentSummary getContentSummary(Path f) throws IOException { + long[] summary = {0, 0, 1}; + for (FileStatus status : listStatus(f)) { + Path targetPath = + Path.getPathWithoutSchemeAndAuthority(status.getPath()); + InodeTree.ResolveResult res = + fsState.resolve(targetPath.toString(), true); + ContentSummary child = + res.targetFileSystem.getContentSummary(res.remainingPath); + summary[0] += child.getLength(); + summary[1] += child.getFileCount(); + summary[2] += child.getDirectoryCount(); + } + return new ContentSummary.Builder() + .length(summary[0]) + .fileCount(summary[1]) + .directoryCount(summary[2]) + .build(); + } + + @Override + public FsStatus getStatus(Path p) throws IOException { + long[] summary = {0, 0, 0}; + for (FileStatus status : listStatus(p)) { + Path targetPath = + Path.getPathWithoutSchemeAndAuthority(status.getPath()); + InodeTree.ResolveResult res = + fsState.resolve(targetPath.toString(), true); + FsStatus child = res.targetFileSystem.getStatus(res.remainingPath); + summary[0] += child.getCapacity(); + summary[1] += child.getUsed(); + summary[2] += child.getRemaining(); + } + return new FsStatus(summary[0], summary[1], summary[2]); } @Override public boolean mkdirs(Path dir, FsPermission permission) - throws AccessControlException, FileAlreadyExistsException { + throws IOException { if (theInternalDir.isRoot() && dir == null) { throw new FileAlreadyExistsException("/ already exits"); } @@ -1207,12 +1508,36 @@ public boolean mkdirs(Path dir, FsPermission permission) dir.toString().substring(1))) { return true; // this is the stupid semantics of FileSystem } + + if (this.fsState.getRootFallbackLink() != null) { + FileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leafChild = (InodeTree.SlashPath.equals(dir)) ? + InodeTree.SlashPath.toString() : + dir.getName(); + Path dirToCreate = new Path(parent, leafChild); + + try { + return linkedFallbackFs.mkdirs(dirToCreate, permission); + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + StringBuilder msg = + new StringBuilder("Failed to create ").append(dirToCreate) + .append(" at fallback : ") + .append(linkedFallbackFs.getUri()); + LOG.debug(msg.toString(), e); + } + throw e; + } + } + throw readOnlyMountTable("mkdirs", dir); } @Override - public boolean mkdirs(Path dir) - throws AccessControlException, FileAlreadyExistsException { + public boolean mkdirs(Path dir) throws IOException { return mkdirs(dir, null); } @@ -1436,6 +1761,7 @@ public void close() throws IOException { super.close(); if (enableInnerCache && cache != null) { cache.closeAll(); + cache.clear(); } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java new file mode 100644 index 0000000000000..5353e93b6f133 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java @@ -0,0 +1,357 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.net.URI; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UnsupportedFileSystemException; + +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; + +/****************************************************************************** + * This class is extended from the ViewFileSystem for the overloaded scheme + * file system. Mount link configurations and in-memory mount table + * building behaviors are inherited from ViewFileSystem. Unlike ViewFileSystem + * scheme (viewfs://), the users would be able to use any scheme. + * + * To use this class, the following configurations need to be added in + * core-site.xml file. + * 1) fs..impl + * = org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme + * 2) fs.viewfs.overload.scheme.target..impl + * = " + * + * Here can be any scheme, but with that scheme there should be a + * hadoop compatible file system available. Second configuration value should + * be the respective scheme's file system implementation class. + * Example: if scheme is configured with "hdfs", then the 2nd configuration + * class name will be org.apache.hadoop.hdfs.DistributedFileSystem. + * if scheme is configured with "s3a", then the 2nd configuration class name + * will be org.apache.hadoop.fs.s3a.S3AFileSystem. + * + * Use Case 1: + * =========== + * If users want some of their existing cluster (hdfs://Cluster) + * data to mount with other hdfs and object store clusters(hdfs://NN1, + * o3fs://bucket1.volume1/, s3a://bucket1/) + * + * fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user + * fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data + * fs.viewfs.mounttable.Cluster.link./backup = s3a://bucket1/backup/ + * + * Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA + * Op2: Create file hdfs://Cluster/data/datafile will go to + * o3fs://bucket1.volume1/data/datafile + * Op3: Create file hdfs://Cluster/backup/data.zip will go to + * s3a://bucket1/backup/data.zip + * + * Use Case 2: + * =========== + * If users want some of their existing cluster (s3a://bucketA/) + * data to mount with other hdfs and object store clusters + * (hdfs://NN1, o3fs://bucket1.volume1/) + * + * fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user + * fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data + * fs.viewfs.mounttable.bucketA.link./salesDB = s3a://bucketA/salesDB/ + * + * Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA + * Op2: Create file s3a://bucketA/data/datafile will go to + * o3fs://bucket1.volume1/data/datafile + * Op3: Create file s3a://bucketA/salesDB/dbfile will go to + * s3a://bucketA/salesDB/dbfile + * + * Note: + * (1) In ViewFileSystemOverloadScheme, by default the mount links will be + * represented as non-symlinks. If you want to change this behavior, please see + * {@link ViewFileSystem#listStatus(Path)} + * (2) In ViewFileSystemOverloadScheme, only the initialized uri's hostname will + * be considered as the mount table name. When the passed uri has hostname:port, + * it will simply ignore the port number and only hostname will be considered as + * the mount table name. + * (3) If there are no mount links configured with the initializing uri's + * hostname as the mount table name, then it will automatically consider the + * current uri as fallback( ex: fs.viewfs.mounttable..linkFallBack) + * target fs uri. + *****************************************************************************/ +@InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" }) +@InterfaceStability.Evolving +public class ViewFileSystemOverloadScheme extends ViewFileSystem { + private URI myUri; + private boolean supportAutoAddingFallbackOnNoMounts = true; + public ViewFileSystemOverloadScheme() throws IOException { + super(); + } + + @Override + public String getScheme() { + return myUri.getScheme(); + } + + /** + * By default returns false as ViewFileSystemOverloadScheme supports auto + * adding fallback on no mounts. + */ + public boolean supportAutoAddingFallbackOnNoMounts() { + return this.supportAutoAddingFallbackOnNoMounts; + } + + /** + * Sets whether to add fallback automatically when no mount points found. + */ + public void setSupportAutoAddingFallbackOnNoMounts( + boolean addAutoFallbackOnNoMounts) { + this.supportAutoAddingFallbackOnNoMounts = addAutoFallbackOnNoMounts; + } + + @Override + public void initialize(URI theUri, Configuration conf) throws IOException { + this.myUri = theUri; + if (LOG.isDebugEnabled()) { + LOG.debug("Initializing the ViewFileSystemOverloadScheme with the uri: " + + theUri); + } + String mountTableConfigPath = + conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH); + /* The default value to false in ViewFSOverloadScheme */ + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + conf.getBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + false)); + /* the default value to true in ViewFSOverloadScheme */ + conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + conf.getBoolean(Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + true)); + if (null != mountTableConfigPath) { + MountTableConfigLoader loader = new HCFSMountTableConfigLoader(); + loader.load(mountTableConfigPath, conf); + } else { + // TODO: Should we fail here.? + if (LOG.isDebugEnabled()) { + LOG.debug( + "Missing configuration for fs.viewfs.mounttable.path. Proceeding" + + "with core-site.xml mount-table information if avaialable."); + } + } + super.initialize(theUri, conf); + } + + /** + * This method is overridden because in ViewFileSystemOverloadScheme if + * overloaded scheme matches with mounted target fs scheme, file system + * should be created without going into fs..impl based resolution. + * Otherwise it will end up in an infinite loop as the target will be + * resolved again to ViewFileSystemOverloadScheme as fs..impl points + * to ViewFileSystemOverloadScheme. So, below method will initialize the + * fs.viewfs.overload.scheme.target..impl. Other schemes can + * follow fs.newInstance + */ + @Override + protected FsGetter fsGetter() { + return new ChildFsGetter(getScheme()); + } + + /** + * This class checks whether the rooScheme is same as URI scheme. If both are + * same, then it will initialize file systems by using the configured + * fs.viewfs.overload.scheme.target..impl class. + */ + static class ChildFsGetter extends FsGetter { + + private final String rootScheme; + + ChildFsGetter(String rootScheme) { + this.rootScheme = rootScheme; + } + + @Override + public FileSystem getNewInstance(URI uri, Configuration conf) + throws IOException { + if (uri.getScheme().equals(this.rootScheme)) { + if (LOG.isDebugEnabled()) { + LOG.debug( + "The file system initialized uri scheme is matching with the " + + "given target uri scheme. The target uri is: " + uri); + } + /* + * Avoid looping when target fs scheme is matching to overloaded scheme. + */ + return createFileSystem(uri, conf); + } else { + return FileSystem.newInstance(uri, conf); + } + } + + /** + * When ViewFileSystemOverloadScheme scheme and target uri scheme are + * matching, it will not take advantage of FileSystem cache as it will + * create instance directly. For caching needs please set + * "fs.viewfs.enable.inner.cache" to true. + */ + @Override + public FileSystem get(URI uri, Configuration conf) throws IOException { + if (uri.getScheme().equals(this.rootScheme)) { + // Avoid looping when target fs scheme is matching to overloaded + // scheme. + if (LOG.isDebugEnabled()) { + LOG.debug( + "The file system initialized uri scheme is matching with the " + + "given target uri scheme. So, the target file system " + + "instances will not be cached. To cache fs instances, " + + "please set fs.viewfs.enable.inner.cache to true. " + + "The target uri is: " + uri); + } + return createFileSystem(uri, conf); + } else { + return FileSystem.get(uri, conf); + } + } + + private FileSystem createFileSystem(URI uri, Configuration conf) + throws IOException { + final String fsImplConf = String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + uri.getScheme()); + Class clazz = conf.getClass(fsImplConf, null); + if (clazz == null) { + throw new UnsupportedFileSystemException( + String.format("%s=null: %s: %s", fsImplConf, + "No overload scheme fs configured", uri.getScheme())); + } + FileSystem fs = (FileSystem) newInstance(clazz, uri, conf); + fs.initialize(uri, conf); + return fs; + } + + private T newInstance(Class theClass, URI uri, Configuration conf) { + T result; + try { + Constructor meth = theClass.getConstructor(); + meth.setAccessible(true); + result = meth.newInstance(); + } catch (InvocationTargetException e) { + Throwable cause = e.getCause(); + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else { + throw new RuntimeException(cause); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + return result; + } + + } + + /** + * This is an admin only API to give access to its child raw file system, if + * the path is link. If the given path is an internal directory(path is from + * mount paths tree), it will initialize the file system of given path uri + * directly. If path cannot be resolved to any internal directory or link, it + * will throw NotInMountpointException. Please note, this API will not return + * chrooted file system. Instead, this API will get actual raw file system + * instances. + * + * @param path - fs uri path + * @param conf - configuration + * @throws IOException + */ + public FileSystem getRawFileSystem(Path path, Configuration conf) + throws IOException { + InodeTree.ResolveResult res; + try { + res = fsState.resolve(getUriPath(path), true); + return res.isInternalDir() ? fsGetter().get(path.toUri(), conf) + : ((ChRootedFileSystem) res.targetFileSystem).getMyFs(); + } catch (FileNotFoundException e) { + // No link configured with passed path. + throw new NotInMountpointException(path, + "No link found for the given path."); + } + } + + /** + * Gets the mount path info, which contains the target file system and + * remaining path to pass to the target file system. + */ + public MountPathInfo getMountPathInfo(Path path, + Configuration conf) throws IOException { + InodeTree.ResolveResult res; + try { + res = fsState.resolve(getUriPath(path), true); + FileSystem fs = res.isInternalDir() ? + (fsState.getRootFallbackLink() != null ? + ((ChRootedFileSystem) fsState + .getRootFallbackLink().targetFileSystem).getMyFs() : + fsGetter().get(path.toUri(), conf)) : + ((ChRootedFileSystem) res.targetFileSystem).getMyFs(); + return new MountPathInfo(res.remainingPath, res.resolvedPath, + fs); + } catch (FileNotFoundException e) { + // No link configured with passed path. + throw new NotInMountpointException(path, + "No link found for the given path."); + } + } + + /** + * A class to maintain the target file system and a path to pass to the target + * file system. + */ + public static class MountPathInfo { + private Path pathOnTarget; + private T targetFs; + + public MountPathInfo(Path pathOnTarget, String resolvedPath, T targetFs) { + this.pathOnTarget = pathOnTarget; + this.targetFs = targetFs; + } + + public Path getPathOnTarget() { + return this.pathOnTarget; + } + + public T getTargetFs() { + return this.targetFs; + } + } + + /** + * @return Gets the fallback file system configured. Usually, this will be the + * default cluster. + */ + public FileSystem getFallbackFileSystem() { + if (fsState.getRootFallbackLink() == null) { + return null; + } + return ((ChRootedFileSystem) fsState.getRootFallbackLink().targetFileSystem) + .getMyFs(); + } + +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java index c8a1d78cffd46..f486a10b4c8f9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java @@ -51,6 +51,17 @@ public static boolean isViewFileSystem(final FileSystem fileSystem) { return fileSystem.getScheme().equals(FsConstants.VIEWFS_SCHEME); } + /** + * Check if the FileSystem is a ViewFileSystemOverloadScheme. + * + * @param fileSystem + * @return true if the fileSystem is ViewFileSystemOverloadScheme + */ + public static boolean isViewFileSystemOverloadScheme( + final FileSystem fileSystem) { + return fileSystem instanceof ViewFileSystemOverloadScheme; + } + /** * Get FsStatus for all ViewFsMountPoints matching path for the given * ViewFileSystem. @@ -93,7 +104,8 @@ public static boolean isViewFileSystem(final FileSystem fileSystem) { */ public static Map getStatus( FileSystem fileSystem, Path path) throws IOException { - if (!isViewFileSystem(fileSystem)) { + if (!(isViewFileSystem(fileSystem) + || isViewFileSystemOverloadScheme(fileSystem))) { throw new UnsupportedFileSystemException("FileSystem '" + fileSystem.getUri() + "'is not a ViewFileSystem."); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 2c8c1a538e433..a7d56fa56f443 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.fs.viewfs; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import java.io.FileNotFoundException; @@ -25,10 +27,14 @@ import java.net.URISyntaxException; import java.util.ArrayList; import java.util.EnumSet; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; + +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -40,6 +46,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; @@ -65,7 +72,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; - +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * ViewFs (extends the AbstractFileSystem interface) implements a client-side @@ -152,6 +160,7 @@ @InterfaceAudience.Public @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ public class ViewFs extends AbstractFileSystem { + static final Logger LOG = LoggerFactory.getLogger(ViewFs.class); final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final Configuration config; @@ -159,6 +168,7 @@ public class ViewFs extends AbstractFileSystem { Path homeDir = null; private ViewFileSystem.RenameStrategy renameStrategy = ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT; + private static boolean showMountLinksAsSymlinks = true; static AccessControlException readOnlyMountTable(final String operation, final String p) { @@ -186,7 +196,16 @@ URI[] getTargets() { return targets; } } - + + /** + * Returns the ViewFileSystem type. + * + * @return viewfs + */ + String getType() { + return FsConstants.VIEWFS_TYPE; + } + public ViewFs(final Configuration conf) throws IOException, URISyntaxException { this(FsConstants.VIEWFS_URI, conf); @@ -207,9 +226,15 @@ public ViewFs(final Configuration conf) throws IOException, creationTime = Time.now(); ugi = UserGroupInformation.getCurrentUser(); config = conf; + showMountLinksAsSymlinks = config + .getBoolean(CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, + CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT); // Now build client side view (i.e. client side mount table) from config. String authority = theUri.getAuthority(); - fsState = new InodeTree(conf, authority) { + boolean initingUriAsFallbackOnNoMounts = + !FsConstants.VIEWFS_TYPE.equals(getType()); + fsState = new InodeTree(conf, authority, theUri, + initingUriAsFallbackOnNoMounts) { @Override protected AbstractFileSystem getTargetFileSystem(final URI uri) @@ -226,7 +251,8 @@ protected AbstractFileSystem getTargetFileSystem(final URI uri) @Override protected AbstractFileSystem getTargetFileSystem( final INodeDir dir) throws URISyntaxException { - return new InternalDirOfViewFs(dir, creationTime, ugi, getUri()); + return new InternalDirOfViewFs(dir, creationTime, ugi, getUri(), this, + config); } @Override @@ -349,6 +375,14 @@ public FileChecksum getFileChecksum(final Path f) return res.targetFileSystem.getFileChecksum(res.remainingPath); } + /** + * {@inheritDoc} + * + * If the given path is a symlink(mount link), the path will be resolved to a + * target path and it will get the resolved path's FileStatus object. It will + * not be represented as a symlink and isDirectory API returns true if the + * resolved path is a directory, false otherwise. + */ @Override public FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { @@ -434,6 +468,32 @@ public LocatedFileStatus getViewFsFileStatus(LocatedFileStatus stat, }; } + /** + * {@inheritDoc} + * + * Note: listStatus considers listing from fallbackLink if available. If the + * same directory path is present in configured mount path as well as in + * fallback fs, then only the fallback path will be listed in the returned + * result except for link. + * + * If any of the the immediate children of the given path f is a symlink(mount + * link), the returned FileStatus object of that children would be represented + * as a symlink. It will not be resolved to the target path and will not get + * the target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * This behavior can be changed by setting an advanced configuration + * fs.viewfs.mount.links.as.symlinks to false. In this case, mount points will + * be represented as non-symlinks and all the file/directory attributes like + * permissions, isDirectory etc will be assigned from it's resolved target + * directory/file. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link ViewFs#getFileStatus(Path f)} + * + * Note: In ViewFs, by default the mount links are represented as symlinks. + */ @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { @@ -488,23 +548,60 @@ public boolean truncate(final Path f, final long newLength) public void renameInternal(final Path src, final Path dst, final boolean overwrite) throws IOException, UnresolvedLinkException { // passing resolveLastComponet as false to catch renaming a mount point - // itself we need to catch this as an internal operation and fail. - InodeTree.ResolveResult resSrc = - fsState.resolve(getUriPath(src), false); - + // itself we need to catch this as an internal operation and fail if no + // fallback. + InodeTree.ResolveResult resSrc = + fsState.resolve(getUriPath(src), false); + if (resSrc.isInternalDir()) { - throw new AccessControlException( - "Cannot Rename within internal dirs of mount table: src=" + src - + " is readOnly"); + if (fsState.getRootFallbackLink() == null) { + // If fallback is null, we can't rename from src. + throw new AccessControlException( + "Cannot Rename within internal dirs of mount table: src=" + src + + " is readOnly"); + } + InodeTree.ResolveResult resSrcWithLastComp = + fsState.resolve(getUriPath(src), true); + if (resSrcWithLastComp.isInternalDir() || resSrcWithLastComp + .isLastInternalDirLink()) { + throw new AccessControlException( + "Cannot Rename within internal dirs of mount table: src=" + src + + " is readOnly"); + } else { + // This is fallback and let's set the src fs with this fallback + resSrc = resSrcWithLastComp; + } } InodeTree.ResolveResult resDst = - fsState.resolve(getUriPath(dst), false); + fsState.resolve(getUriPath(dst), false); + if (resDst.isInternalDir()) { - throw new AccessControlException( - "Cannot Rename within internal dirs of mount table: dest=" + dst - + " is readOnly"); + if (fsState.getRootFallbackLink() == null) { + // If fallback is null, we can't rename to dst. + throw new AccessControlException( + "Cannot Rename within internal dirs of mount table: dest=" + dst + + " is readOnly"); + } + // if the fallback exist, we may have chance to rename to fallback path + // where dst parent is matching to internalDir. + InodeTree.ResolveResult resDstWithLastComp = + fsState.resolve(getUriPath(dst), true); + if (resDstWithLastComp.isInternalDir()) { + // We need to get fallback here. If matching fallback path not exist, it + // will fail later. This is a very special case: Even though we are on + // internal directory, we should allow to rename, so that src files will + // moved under matching fallback dir. + resDst = new InodeTree.ResolveResult( + InodeTree.ResultKind.INTERNAL_DIR, + fsState.getRootFallbackLink().getTargetFileSystem(), "/", + new Path(resDstWithLastComp.resolvedPath), false); + } else { + // The link resolved to some target fs or fallback fs. + resDst = resDstWithLastComp; + } } + //Alternate 1: renames within same file system URI srcUri = resSrc.targetFileSystem.getUri(); URI dstUri = resDst.targetFileSystem.getUri(); @@ -841,15 +938,20 @@ static class InternalDirOfViewFs extends AbstractFileSystem { final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final URI myUri; // the URI of the outer ViewFs - + private InodeTree fsState; + private Configuration conf; + public InternalDirOfViewFs(final InodeTree.INodeDir dir, - final long cTime, final UserGroupInformation ugi, final URI uri) + final long cTime, final UserGroupInformation ugi, final URI uri, + InodeTree fsState, Configuration conf) throws URISyntaxException { super(FsConstants.VIEWFS_URI, FsConstants.VIEWFS_SCHEME, false, -1); theInternalDir = dir; creationTime = cTime; this.ugi = ugi; myUri = uri; + this.fsState = fsState; + this.conf = conf; } static private void checkPathIsSlash(final Path f) throws IOException { @@ -868,6 +970,41 @@ public FSDataOutputStream createInternal(final Path f, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException { + Preconditions.checkNotNull(f, "File cannot be null."); + if (InodeTree.SlashPath.equals(f)) { + throw new FileAlreadyExistsException( + "/ is not a file. The directory / already exist at: " + + theInternalDir.fullPath); + } + + if (this.fsState.getRootFallbackLink() != null) { + if (theInternalDir.getChildren().containsKey(f.getName())) { + throw new FileAlreadyExistsException( + "A mount path(file/dir) already exist with the requested path: " + + theInternalDir.getChildren().get(f.getName()).fullPath); + } + + AbstractFileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leaf = f.getName(); + Path fileToCreate = new Path(parent, leaf); + + try { + return linkedFallbackFs + .createInternal(fileToCreate, flag, absolutePermission, + bufferSize, replication, blockSize, progress, checksumOpt, + true); + } catch (IOException e) { + StringBuilder msg = + new StringBuilder("Failed to create file:").append(fileToCreate) + .append(" at fallback : ").append(linkedFallbackFs.getUri()); + LOG.error(msg.toString(), e); + throw e; + } + } + throw readOnlyMountTable("create", f); } @@ -881,6 +1018,21 @@ public boolean delete(final Path f, final boolean recursive) @Override public BlockLocation[] getFileBlockLocations(final Path f, final long start, final long len) throws FileNotFoundException, IOException { + // When application calls listFiles on internalDir, it would return + // RemoteIterator from InternalDirOfViewFs. If there is a fallBack, there + // is a chance of files exists under that internalDir in fallback. + // Iterator#next will call getFileBlockLocations with that files. So, we + // should return getFileBlockLocations on fallback. See HDFS-15532. + if (!InodeTree.SlashPath.equals(f) && this.fsState + .getRootFallbackLink() != null) { + AbstractFileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + Path pathToFallbackFs = new Path(parent, f.getName()); + return linkedFallbackFs + .getFileBlockLocations(pathToFallbackFs, start, len); + } checkPathIsSlash(f); throw new FileNotFoundException("Path points to dir not a file"); } @@ -915,11 +1067,25 @@ public FileStatus getFileLinkStatus(final Path f) if (inode.isLink()) { INodeLink inodelink = (INodeLink) inode; - result = new FileStatus(0, false, 0, 0, creationTime, creationTime, + try { + String linkedPath = inodelink.getTargetFileSystem() + .getUri().getPath(); + FileStatus status = ((ChRootedFs)inodelink.getTargetFileSystem()) + .getMyFs().getFileStatus(new Path(linkedPath)); + result = new FileStatus(status.getLen(), false, + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), status.getGroup(), + inodelink.getTargetLink(), + new Path(inode.fullPath).makeQualified( + myUri, null)); + } catch (FileNotFoundException ex) { + result = new FileStatus(0, false, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), inodelink.getTargetLink(), new Path(inode.fullPath).makeQualified( myUri, null)); + } } else { result = new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), @@ -950,45 +1116,154 @@ public int getUriDefaultPort() { return -1; } + /** + * {@inheritDoc} + * + * Note: listStatus on root("/") considers listing from fallbackLink if + * available. If the same directory name is present in configured mount + * path as well as in fallback link, then only the configured mount path + * will be listed in the returned result. + */ @Override - public FileStatus[] listStatus(final Path f) throws AccessControlException, - IOException { + public FileStatus[] listStatus(final Path f) throws IOException { checkPathIsSlash(f); - FileStatus[] result = new FileStatus[theInternalDir.getChildren().size()]; + FileStatus[] fallbackStatuses = listStatusForFallbackLink(); + Set linkStatuses = new HashSet<>(); + Set internalDirStatuses = new HashSet<>(); int i = 0; for (Entry> iEntry : theInternalDir.getChildren().entrySet()) { INode inode = iEntry.getValue(); - - + Path path = new Path(inode.fullPath).makeQualified(myUri, null); if (inode.isLink()) { INodeLink link = (INodeLink) inode; - result[i++] = new FileStatus(0, false, 0, 0, - creationTime, creationTime, - PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(), - link.getTargetLink(), - new Path(inode.fullPath).makeQualified( - myUri, null)); + if (showMountLinksAsSymlinks) { + // To maintain backward compatibility, with default option(showing + // mount links as symlinks), we will represent target link as + // symlink and rest other properties are belongs to mount link only. + linkStatuses.add( + new FileStatus(0, false, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), link.getTargetLink(), path)); + continue; + } + + // We will represent as non-symlinks. Here it will show target + // directory/file properties like permissions, isDirectory etc on + // mount path. The path will be a mount link path and isDirectory is + // true if target is dir, otherwise false. + String linkedPath = link.getTargetFileSystem().getUri().getPath(); + if ("".equals(linkedPath)) { + linkedPath = "/"; + } + try { + FileStatus status = + ((ChRootedFs) link.getTargetFileSystem()).getMyFs() + .getFileStatus(new Path(linkedPath)); + linkStatuses.add( + new FileStatus(status.getLen(), status.isDirectory(), + status.getReplication(), status.getBlockSize(), + status.getModificationTime(), status.getAccessTime(), + status.getPermission(), status.getOwner(), + status.getGroup(), null, path)); + } catch (FileNotFoundException ex) { + LOG.warn("Cannot get one of the children's(" + path + + ") target path(" + link.getTargetFileSystem().getUri() + + ") file status.", ex); + throw ex; + } } else { - result[i++] = new FileStatus(0, true, 0, 0, - creationTime, creationTime, - PERMISSION_555, ugi.getShortUserName(), ugi.getGroupNames()[0], - new Path(inode.fullPath).makeQualified( - myUri, null)); + internalDirStatuses.add( + new FileStatus(0, true, 0, 0, creationTime, creationTime, + PERMISSION_555, ugi.getShortUserName(), + ugi.getPrimaryGroupName(), path)); } } - return result; + + FileStatus[] internalDirStatusesMergedWithFallBack = internalDirStatuses + .toArray(new FileStatus[internalDirStatuses.size()]); + if (fallbackStatuses.length > 0) { + internalDirStatusesMergedWithFallBack = + merge(fallbackStatuses, internalDirStatusesMergedWithFallBack); + } + + // Links will always have precedence than internalDir or fallback paths. + return merge(linkStatuses.toArray(new FileStatus[linkStatuses.size()]), + internalDirStatusesMergedWithFallBack); + } + + private FileStatus[] merge(FileStatus[] toStatuses, + FileStatus[] fromStatuses) { + ArrayList result = new ArrayList<>(); + Set pathSet = new HashSet<>(); + for (FileStatus status : toStatuses) { + result.add(status); + pathSet.add(status.getPath().getName()); + } + for (FileStatus status : fromStatuses) { + if (!pathSet.contains(status.getPath().getName())) { + result.add(status); + } + } + return result.toArray(new FileStatus[result.size()]); + } + + private FileStatus[] listStatusForFallbackLink() throws IOException { + if (fsState.getRootFallbackLink() != null) { + AbstractFileSystem linkedFallbackFs = + fsState.getRootFallbackLink().getTargetFileSystem(); + Path p = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + if (theInternalDir.isRoot() || FileContext + .getFileContext(linkedFallbackFs, conf).util().exists(p)) { + // Fallback link is only applicable for root + FileStatus[] statuses = linkedFallbackFs.listStatus(p); + for (FileStatus status : statuses) { + // Fix the path back to viewfs scheme + Path pathFromConfiguredFallbackRoot = + new Path(p, status.getPath().getName()); + status.setPath( + new Path(myUri.toString(), pathFromConfiguredFallbackRoot)); + } + return statuses; + } + } + return new FileStatus[0]; } @Override public void mkdir(final Path dir, final FsPermission permission, - final boolean createParent) throws AccessControlException, - FileAlreadyExistsException { + final boolean createParent) throws IOException { if (theInternalDir.isRoot() && dir == null) { throw new FileAlreadyExistsException("/ already exits"); } + + if (this.fsState.getRootFallbackLink() != null) { + AbstractFileSystem linkedFallbackFs = + this.fsState.getRootFallbackLink().getTargetFileSystem(); + Path parent = Path.getPathWithoutSchemeAndAuthority( + new Path(theInternalDir.fullPath)); + String leafChild = (InodeTree.SlashPath.equals(dir)) ? + InodeTree.SlashPath.toString() : + dir.getName(); + Path dirToCreate = new Path(parent, leafChild); + try { + // We are here because, the parent dir already exist in the mount + // table internal tree. So, let's create parent always in fallback. + linkedFallbackFs.mkdir(dirToCreate, permission, true); + return; + } catch (IOException e) { + if (LOG.isDebugEnabled()) { + StringBuilder msg = new StringBuilder("Failed to create {}") + .append(" at fallback fs : {}"); + LOG.debug(msg.toString(), dirToCreate, linkedFallbackFs.getUri()); + } + throw e; + } + } + throw readOnlyMountTable("mkdir", dir); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/package-info.java new file mode 100644 index 0000000000000..89986d0e5ef69 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/package-info.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * ViewFileSystem and ViewFileSystemOverloadScheme classes. + */ +@InterfaceAudience.LimitedPrivate({"MapReduce", "HBase", "Hive" }) +@InterfaceStability.Stable +package org.apache.hadoop.fs.viewfs; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java index 828a17bcb972e..08f6df5a78661 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java @@ -43,8 +43,8 @@ import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.KeeperException.Code; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java index 4fc52d557cf9d..e7ed7304988cb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.ipc.RPC; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -213,7 +213,7 @@ public void failover(HAServiceTarget fromSvc, // Fence fromSvc if it's required or forced by the user if (tryFence) { - if (!fromSvc.getFencer().fence(fromSvc)) { + if (!fromSvc.getFencer().fence(fromSvc, toSvc)) { throw new FailoverFailedException("Unable to fence " + fromSvc + ". Fencing failed."); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index 0950ea7e01c57..b23c8555df72a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -39,7 +39,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java index 9d5c8e7b7ea3b..2e6b1fe113479 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java @@ -31,7 +31,7 @@ import org.apache.hadoop.ha.protocolPB.ZKFCProtocolClientSideTranslatorPB; import org.apache.hadoop.net.NetUtils; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * Represents a target of the client side HA administration commands. @@ -44,6 +44,12 @@ public abstract class HAServiceTarget { private static final String PORT_SUBST_KEY = "port"; private static final String ADDRESS_SUBST_KEY = "address"; + /** + * The HAState this service target is intended to be after transition + * is complete. + */ + private HAServiceProtocol.HAServiceState transitionTargetHAStatus; + /** * @return the IPC address of the target node. */ @@ -93,6 +99,15 @@ public HAServiceProtocol getProxy(Configuration conf, int timeoutMs) return getProxyForAddress(conf, timeoutMs, getAddress()); } + public void setTransitionTargetHAStatus( + HAServiceProtocol.HAServiceState status) { + this.transitionTargetHAStatus = status; + } + + public HAServiceProtocol.HAServiceState getTransitionTargetHAStatus() { + return this.transitionTargetHAStatus; + } + /** * Returns a proxy to connect to the target HA service for health monitoring. * If {@link #getHealthMonitorAddress()} is implemented to return a non-null diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java index 16c30752edc20..732058649bc50 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.util.Daemon; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java index 64e7315130257..7f4a0790a3bc1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java @@ -27,8 +27,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,15 +89,32 @@ public static NodeFencer create(Configuration conf, String confKey) } public boolean fence(HAServiceTarget fromSvc) { + return fence(fromSvc, null); + } + + public boolean fence(HAServiceTarget fromSvc, HAServiceTarget toSvc) { LOG.info("====== Beginning Service Fencing Process... ======"); int i = 0; for (FenceMethodWithArg method : methods) { LOG.info("Trying method " + (++i) + "/" + methods.size() +": " + method); try { - if (method.method.tryFence(fromSvc, method.arg)) { - LOG.info("====== Fencing successful by method " + method + " ======"); - return true; + // only true when target node is given, AND fencing on it failed + boolean toSvcFencingFailed = false; + // if target is given, try to fence on target first. Only if fencing + // on target succeeded, do fencing on source node. + if (toSvc != null) { + toSvcFencingFailed = !method.method.tryFence(toSvc, method.arg); + } + if (toSvcFencingFailed) { + LOG.error("====== Fencing on target failed, skipping fencing " + + "on source ======"); + } else { + if (method.method.tryFence(fromSvc, method.arg)) { + LOG.info("====== Fencing successful by method " + + method + " ======"); + return true; + } } } catch (BadFencingConfigurationException e) { LOG.error("Fencing method " + method + " misconfigured", e); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java index 7e4a88f729fad..3ae8394b62342 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java @@ -19,11 +19,12 @@ import java.io.IOException; import java.lang.reflect.Field; +import java.util.Arrays; import java.util.Map; import org.apache.hadoop.conf.Configured; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.Shell; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,6 +61,11 @@ public class ShellCommandFencer /** Prefix for target parameters added to the environment */ private static final String TARGET_PREFIX = "target_"; + /** Prefix for source parameters added to the environment */ + private static final String SOURCE_PREFIX = "source_"; + + private static final String ARG_DELIMITER = ","; + @VisibleForTesting static Logger LOG = LoggerFactory.getLogger(ShellCommandFencer.class); @@ -73,8 +79,9 @@ public void checkArgs(String args) throws BadFencingConfigurationException { } @Override - public boolean tryFence(HAServiceTarget target, String cmd) { + public boolean tryFence(HAServiceTarget target, String args) { ProcessBuilder builder; + String cmd = parseArgs(target.getTransitionTargetHAStatus(), args); if (!Shell.WINDOWS) { builder = new ProcessBuilder("bash", "-e", "-c", cmd); @@ -127,6 +134,28 @@ public boolean tryFence(HAServiceTarget target, String cmd) { return rc == 0; } + private String parseArgs(HAServiceProtocol.HAServiceState state, + String cmd) { + String[] args = cmd.split(ARG_DELIMITER); + if (args.length == 1) { + // only one command is given, assuming both src and dst + // will execute the same command/script. + return args[0]; + } + if (args.length > 2) { + throw new IllegalArgumentException("Expecting arguments size of at most " + + "two, getting " + Arrays.asList(args)); + } + if (HAServiceProtocol.HAServiceState.ACTIVE.equals(state)) { + return args[0]; + } else if (HAServiceProtocol.HAServiceState.STANDBY.equals(state)) { + return args[1]; + } else { + throw new IllegalArgumentException( + "Unexpected HA service state:" + state); + } + } + /** * Abbreviate a string by putting '...' in the middle of it, * in an attempt to keep logs from getting too messy. @@ -190,9 +219,24 @@ private void setConfAsEnvVars(Map env) { */ private void addTargetInfoAsEnvVars(HAServiceTarget target, Map environment) { + String prefix; + HAServiceProtocol.HAServiceState targetState = + target.getTransitionTargetHAStatus(); + if (targetState == null || + HAServiceProtocol.HAServiceState.ACTIVE.equals(targetState)) { + // null is assumed to be same as ACTIVE, this is to be compatible + // with existing tests/use cases where target state is not specified + // but assuming it's active. + prefix = TARGET_PREFIX; + } else if (HAServiceProtocol.HAServiceState.STANDBY.equals(targetState)) { + prefix = SOURCE_PREFIX; + } else { + throw new IllegalArgumentException( + "Unexpected HA service state:" + targetState); + } for (Map.Entry e : target.getFencingParameters().entrySet()) { - String key = TARGET_PREFIX + e.getKey(); + String key = prefix + e.getKey(); key = key.replace('.', '_'); environment.put(key, e.getValue()); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java index e0c2f4d9e7b77..a13b592e5cb15 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java @@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configured; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.jcraft.jsch.ChannelExec; import com.jcraft.jsch.JSch; import com.jcraft.jsch.JSchException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java index 86dd91ee1423d..1e5b27a6fea64 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService; import org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB; import org.apache.hadoop.ha.protocolPB.ZKFCProtocolServerSideTranslatorPB; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.security.AccessControlException; @@ -50,7 +50,7 @@ public class ZKFCRpcServer implements ZKFCProtocol { this.zkfc = zkfc; RPC.setProtocolEngine(conf, ZKFCProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ZKFCProtocolServerSideTranslatorPB translator = new ZKFCProtocolServerSideTranslatorPB(this); BlockingService service = ZKFCProtocolService diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java index 943d53dd3a823..10459404a247a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java @@ -31,11 +31,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException; import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; +import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.util.ZKUtil; import org.apache.hadoop.util.ZKUtil.ZKAuthInfo; import org.apache.hadoop.ha.HealthMonitor.State; @@ -50,9 +53,9 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.zookeeper.data.ACL; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -318,9 +321,10 @@ private void initHM() { healthMonitor.addServiceStateCallback(new ServiceStateCallBacks()); healthMonitor.start(); } - + protected void initRPC() throws IOException { InetSocketAddress bindAddr = getRpcAddressToBindTo(); + LOG.info("ZKFC RpcServer binding to {}", bindAddr); rpcServer = new ZKFCRpcServer(conf, bindAddr, this, getPolicyProvider()); } @@ -342,8 +346,19 @@ private void initZK() throws HadoopIllegalArgumentException, IOException, zkAcls = Ids.CREATOR_ALL_ACL; } - // Parse authentication from configuration. - List zkAuths = SecurityUtil.getZKAuthInfos(conf, ZK_AUTH_KEY); + // Parse authentication from configuration. Exclude any Credential providers + // using the hdfs scheme to avoid a circular dependency. As HDFS is likely + // not started when ZKFC is started, we cannot read the credentials from it. + Configuration c = conf; + try { + c = ProviderUtils.excludeIncompatibleCredentialProviders( + conf, FileSystem.getFileSystemClass("hdfs", conf)); + } catch (UnsupportedFileSystemException e) { + // Should not happen in a real cluster, as the hdfs FS will always be + // present. Inside tests, the hdfs filesystem will not be present + LOG.debug("No filesystem found for the hdfs scheme", e); + } + List zkAuths = SecurityUtil.getZKAuthInfos(c, ZK_AUTH_KEY); // Sanity check configuration. Preconditions.checkArgument(zkQuorum != null, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java index e53820cd13107..2cbfd0d0ec030 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java @@ -38,7 +38,7 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; @@ -67,7 +67,7 @@ public class HAServiceProtocolClientSideTranslatorPB implements public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(HAServiceProtocolPB.class, RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf); } @@ -76,7 +76,7 @@ public HAServiceProtocolClientSideTranslatorPB( InetSocketAddress addr, Configuration conf, SocketFactory socketFactory, int timeout) throws IOException { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(HAServiceProtocolPB.class, RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java index 7001d93995f0f..3777207c7e45c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto; import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.AccessControlException; @@ -48,7 +48,7 @@ public ZKFCProtocolClientSideTranslatorPB( InetSocketAddress addr, Configuration conf, SocketFactory socketFactory, int timeout) throws IOException { RPC.setProtocolEngine(conf, ZKFCProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(ZKFCProtocolPB.class, RPC.getProtocolVersion(ZKFCProtocolPB.class), addr, UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 3fd74f0e89a27..cdc2a74133af2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -50,9 +50,9 @@ import javax.servlet.http.HttpServletRequestWrapper; import javax.servlet.http.HttpServletResponse; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.spi.container.servlet.ServletContainer; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; @@ -148,7 +148,7 @@ public final class HttpServer2 implements FilterContainer { // idle timeout in milliseconds public static final String HTTP_IDLE_TIMEOUT_MS_KEY = "hadoop.http.idle_timeout.ms"; - public static final int HTTP_IDLE_TIMEOUT_MS_DEFAULT = 10000; + public static final int HTTP_IDLE_TIMEOUT_MS_DEFAULT = 60000; public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir"; public static final String FILTER_INITIALIZER_PROPERTY @@ -1346,7 +1346,11 @@ private void bindForPortRange(ServerConnector listener, int startPort) try { bindListener(listener); return; - } catch (BindException ex) { + } catch (IOException ex) { + if (!(ex instanceof BindException) + && !(ex.getCause() instanceof BindException)) { + throw ex; + } // Ignore exception. Move to next port. ioException = ex; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java index 44e0bdce5edd6..8f8c55d5e4654 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java @@ -29,7 +29,7 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Abstract base class for MapWritable and SortedMapWritable diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java index 4c2fa67f8f24c..e21dc2f632317 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** A reusable {@link DataOutput} implementation that writes to an in-memory * buffer. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java index bbedf2a2dc371..6a162c3ff2087 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.io; -import com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; import org.apache.commons.lang3.builder.HashCodeBuilder; import java.nio.ByteBuffer; @@ -96,6 +96,7 @@ public synchronized ByteBuffer getBuffer(boolean direct, int length) { ByteBuffer.allocate(length); } tree.remove(entry.getKey()); + entry.getValue().clear(); return entry.getValue(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java index 5af6602b87886..1ef2119b688fd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java @@ -26,7 +26,7 @@ import org.slf4j.LoggerFactory; import sun.misc.Unsafe; -import com.google.common.primitives.UnsignedBytes; +import org.apache.hadoop.thirdparty.com.google.common.primitives.UnsignedBytes; /** * Utility code to do optimized byte-array comparison. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java index 804d365450692..34ae2c60d75ae 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java @@ -29,8 +29,8 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_WILLNEED; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java index 9d3c3c1ceeaa7..2f1eecd7a6320 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java @@ -32,7 +32,7 @@ import org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class provides secure APIs for opening and creating files on the local diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java index fec0a4ac81f2f..3f4649f04dc9a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java @@ -24,7 +24,7 @@ import java.rmi.server.UID; import java.security.MessageDigest; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.Options; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.Options.CreateOpts; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java index 99590eda679af..7fd5633daa698 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java @@ -236,7 +236,7 @@ public Decompressor createDecompressor() { */ @Override public String getDefaultExtension() { - return ".bz2"; + return CodecConstants.BZIP2_CODEC_EXTENSION; } private static class BZip2CompressionOutputStream extends diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecConstants.java new file mode 100644 index 0000000000000..96410a18ebcb5 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecConstants.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.io.compress; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Codec related constants. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class CodecConstants { + + private CodecConstants() { + } + /** + * Default extension for {@link org.apache.hadoop.io.compress.DefaultCodec}. + */ + public static final String DEFAULT_CODEC_EXTENSION = ".deflate"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.BZip2Codec}. + */ + public static final String BZIP2_CODEC_EXTENSION = ".bz2"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.GzipCodec}. + */ + public static final String GZIP_CODEC_EXTENSION = ".gz"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.Lz4Codec}. + */ + public static final String LZ4_CODEC_EXTENSION = ".lz4"; + + /** + * Default extension for + * {@link org.apache.hadoop.io.compress.PassthroughCodec}. + */ + public static final String PASSTHROUGH_CODEC_EXTENSION = ".passthrough"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.SnappyCodec}. + */ + public static final String SNAPPY_CODEC_EXTENSION = ".snappy"; + + /** + * Default extension for {@link org.apache.hadoop.io.compress.ZStandardCodec}. + */ + public static final String ZSTANDARD_CODEC_EXTENSION = ".zst"; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java index f103aad4e8596..2ac2ca65173f0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java @@ -28,9 +28,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java index 2dfa30bf76ec4..55bb132e9c87c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java @@ -25,6 +25,10 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PositionedReadable; import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.IOStatisticsSupport; + /** * A compression input stream. * @@ -34,7 +38,8 @@ */ @InterfaceAudience.Public @InterfaceStability.Evolving -public abstract class CompressionInputStream extends InputStream implements Seekable { +public abstract class CompressionInputStream extends InputStream + implements Seekable, IOStatisticsSource { /** * The input stream to be compressed. */ @@ -68,7 +73,16 @@ public void close() throws IOException { } } } - + + /** + * Return any IOStatistics provided by the underlying stream. + * @return IO stats from the inner stream. + */ + @Override + public IOStatistics getIOStatistics() { + return IOStatisticsSupport.retrieveIOStatistics(in); + } + /** * Read bytes from the stream. * Made abstract to prevent leakage to underlying stream. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java index 71c7f32e665e5..2a11ace81702c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java @@ -23,13 +23,17 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.IOStatisticsSupport; /** * A compression output stream. */ @InterfaceAudience.Public @InterfaceStability.Evolving -public abstract class CompressionOutputStream extends OutputStream { +public abstract class CompressionOutputStream extends OutputStream + implements IOStatisticsSource { /** * The output stream to be compressed. */ @@ -94,4 +98,12 @@ public void flush() throws IOException { */ public abstract void resetState() throws IOException; + /** + * Return any IOStatistics provided by the underlying stream. + * @return IO stats from the inner stream. + */ + @Override + public IOStatistics getIOStatistics() { + return IOStatisticsSupport.retrieveIOStatistics(out); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Decompressor.java index 3808003de291d..e9558fab87325 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Decompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Decompressor.java @@ -92,7 +92,7 @@ public interface Decompressor { * {@link #needsInput()} should be called in order to determine if more * input data is required. * - * @param b Buffer for the compressed data + * @param b Buffer for the uncompressed data * @param off Start offset of the data * @param len Size of the buffer * @return The actual number of bytes of uncompressed data. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java index 756ccf3c8ed1f..d66b6f0d0237d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.io.InputStream; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java index 33f39ef9297fb..d2ffb22eaafb3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java @@ -114,7 +114,7 @@ public DirectDecompressor createDirectDecompressor() { @Override public String getDefaultExtension() { - return ".deflate"; + return CodecConstants.DEFAULT_CODEC_EXTENSION; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java index 9bd861da9e890..1535e8c3d386e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java @@ -206,7 +206,7 @@ public DirectDecompressor createDirectDecompressor() { @Override public String getDefaultExtension() { - return ".gz"; + return CodecConstants.GZIP_CODEC_EXTENSION; } static final class GzipZlibCompressor extends ZlibCompressor { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java index 45b5e9cdabd28..8bfb7fe95c4e2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java @@ -27,17 +27,12 @@ import org.apache.hadoop.io.compress.lz4.Lz4Compressor; import org.apache.hadoop.io.compress.lz4.Lz4Decompressor; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.util.NativeCodeLoader; /** * This class creates lz4 compressors/decompressors. */ public class Lz4Codec implements Configurable, CompressionCodec { - static { - NativeCodeLoader.isNativeCodeLoaded(); - } - Configuration conf; /** @@ -60,19 +55,6 @@ public Configuration getConf() { return conf; } - /** - * Are the native lz4 libraries loaded & initialized? - * - * @return true if loaded & initialized, otherwise false - */ - public static boolean isNativeCodeLoaded() { - return NativeCodeLoader.isNativeCodeLoaded(); - } - - public static String getLibraryName() { - return Lz4Compressor.getLibraryName(); - } - /** * Create a {@link CompressionOutputStream} that will write to the given * {@link OutputStream}. @@ -101,9 +83,6 @@ public CompressionOutputStream createOutputStream(OutputStream out) public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException { - if (!isNativeCodeLoaded()) { - throw new RuntimeException("native lz4 library not available"); - } int bufferSize = conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT); @@ -121,10 +100,6 @@ public CompressionOutputStream createOutputStream(OutputStream out, */ @Override public Class getCompressorType() { - if (!isNativeCodeLoaded()) { - throw new RuntimeException("native lz4 library not available"); - } - return Lz4Compressor.class; } @@ -135,9 +110,6 @@ public Class getCompressorType() { */ @Override public Compressor createCompressor() { - if (!isNativeCodeLoaded()) { - throw new RuntimeException("native lz4 library not available"); - } int bufferSize = conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT); @@ -175,10 +147,6 @@ public CompressionInputStream createInputStream(InputStream in) public CompressionInputStream createInputStream(InputStream in, Decompressor decompressor) throws IOException { - if (!isNativeCodeLoaded()) { - throw new RuntimeException("native lz4 library not available"); - } - return new BlockDecompressorStream(in, decompressor, conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT)); @@ -191,10 +159,6 @@ public CompressionInputStream createInputStream(InputStream in, */ @Override public Class getDecompressorType() { - if (!isNativeCodeLoaded()) { - throw new RuntimeException("native lz4 library not available"); - } - return Lz4Decompressor.class; } @@ -205,9 +169,6 @@ public Class getDecompressorType() { */ @Override public Decompressor createDecompressor() { - if (!isNativeCodeLoaded()) { - throw new RuntimeException("native lz4 library not available"); - } int bufferSize = conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT); @@ -221,6 +182,6 @@ public Decompressor createDecompressor() { */ @Override public String getDefaultExtension() { - return ".lz4"; + return CodecConstants.LZ4_CODEC_EXTENSION; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java index a3f0bffeebc0f..074762c0e8f7a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/PassthroughCodec.java @@ -77,7 +77,8 @@ public class PassthroughCodec * This default extension is here so that if no extension has been defined, * some value is still returned: {@value}.. */ - public static final String DEFAULT_EXTENSION = ".passthrough"; + public static final String DEFAULT_EXTENSION = + CodecConstants.PASSTHROUGH_CODEC_EXTENSION; private Configuration conf; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java index cd0c7880376bf..77cf36a339b34 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java @@ -28,7 +28,6 @@ import org.apache.hadoop.io.compress.snappy.SnappyDecompressor; import org.apache.hadoop.io.compress.snappy.SnappyDecompressor.SnappyDirectDecompressor; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.util.NativeCodeLoader; /** * This class creates snappy compressors/decompressors. @@ -56,37 +55,6 @@ public Configuration getConf() { return conf; } - /** - * Are the native snappy libraries loaded & initialized? - */ - public static void checkNativeCodeLoaded() { - if (!NativeCodeLoader.buildSupportsSnappy()) { - throw new RuntimeException("native snappy library not available: " + - "this version of libhadoop was built without " + - "snappy support."); - } - if (!NativeCodeLoader.isNativeCodeLoaded()) { - throw new RuntimeException("Failed to load libhadoop."); - } - if (!SnappyCompressor.isNativeCodeLoaded()) { - throw new RuntimeException("native snappy library not available: " + - "SnappyCompressor has not been loaded."); - } - if (!SnappyDecompressor.isNativeCodeLoaded()) { - throw new RuntimeException("native snappy library not available: " + - "SnappyDecompressor has not been loaded."); - } - } - - public static boolean isNativeCodeLoaded() { - return SnappyCompressor.isNativeCodeLoaded() && - SnappyDecompressor.isNativeCodeLoaded(); - } - - public static String getLibraryName() { - return SnappyCompressor.getLibraryName(); - } - /** * Create a {@link CompressionOutputStream} that will write to the given * {@link OutputStream}. @@ -115,7 +83,6 @@ public CompressionOutputStream createOutputStream(OutputStream out) public CompressionOutputStream createOutputStream(OutputStream out, Compressor compressor) throws IOException { - checkNativeCodeLoaded(); int bufferSize = conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT); @@ -133,7 +100,6 @@ public CompressionOutputStream createOutputStream(OutputStream out, */ @Override public Class getCompressorType() { - checkNativeCodeLoaded(); return SnappyCompressor.class; } @@ -144,7 +110,6 @@ public Class getCompressorType() { */ @Override public Compressor createCompressor() { - checkNativeCodeLoaded(); int bufferSize = conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT); @@ -179,7 +144,6 @@ public CompressionInputStream createInputStream(InputStream in) public CompressionInputStream createInputStream(InputStream in, Decompressor decompressor) throws IOException { - checkNativeCodeLoaded(); return new BlockDecompressorStream(in, decompressor, conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT)); @@ -192,7 +156,6 @@ public CompressionInputStream createInputStream(InputStream in, */ @Override public Class getDecompressorType() { - checkNativeCodeLoaded(); return SnappyDecompressor.class; } @@ -203,7 +166,6 @@ public Class getDecompressorType() { */ @Override public Decompressor createDecompressor() { - checkNativeCodeLoaded(); int bufferSize = conf.getInt( CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT); @@ -215,7 +177,7 @@ public Decompressor createDecompressor() { */ @Override public DirectDecompressor createDirectDecompressor() { - return isNativeCodeLoaded() ? new SnappyDirectDecompressor() : null; + return new SnappyDirectDecompressor(); } /** @@ -225,6 +187,6 @@ public DirectDecompressor createDirectDecompressor() { */ @Override public String getDefaultExtension() { - return ".snappy"; + return CodecConstants.SNAPPY_CODEC_EXTENSION; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java index c56bbba3b5959..a7afebc0c49ae 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java @@ -230,7 +230,7 @@ public Decompressor createDecompressor() { */ @Override public String getDefaultExtension() { - return ".zst"; + return CodecConstants.ZSTANDARD_CODEC_EXTENSION; } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java index 3792c365b4d98..607a802678bbd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java @@ -22,9 +22,11 @@ import java.nio.Buffer; import java.nio.ByteBuffer; +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4Compressor; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.Compressor; -import org.apache.hadoop.util.NativeCodeLoader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,22 +50,7 @@ public class Lz4Compressor implements Compressor { private long bytesRead = 0L; private long bytesWritten = 0L; - private final boolean useLz4HC; - - static { - if (NativeCodeLoader.isNativeCodeLoaded()) { - // Initialize the native library - try { - initIDs(); - } catch (Throwable t) { - // Ignore failure to load/initialize lz4 - LOG.warn(t.toString()); - } - } else { - LOG.error("Cannot load " + Lz4Compressor.class.getName() + - " without native hadoop library!"); - } - } + private final LZ4Compressor lz4Compressor; /** * Creates a new compressor. @@ -73,9 +60,21 @@ public class Lz4Compressor implements Compressor { * which trades CPU for compression ratio. */ public Lz4Compressor(int directBufferSize, boolean useLz4HC) { - this.useLz4HC = useLz4HC; this.directBufferSize = directBufferSize; + try { + LZ4Factory lz4Factory = LZ4Factory.fastestInstance(); + if (useLz4HC) { + lz4Compressor = lz4Factory.highCompressor(); + } else { + lz4Compressor = lz4Factory.fastCompressor(); + } + } catch (AssertionError t) { + throw new RuntimeException("lz4-java library is not available: " + + "Lz4Compressor has not been loaded. You need to add " + + "lz4-java.jar to your CLASSPATH. " + t, t); + } + uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf.position(directBufferSize); @@ -236,7 +235,7 @@ public synchronized int compress(byte[] b, int off, int len) } // Compress data - n = useLz4HC ? compressBytesDirectHC() : compressBytesDirect(); + n = compressDirectBuf(); compressedDirectBuf.limit(n); uncompressedDirectBuf.clear(); // lz4 consumes all buffer input @@ -302,11 +301,20 @@ public synchronized long getBytesWritten() { public synchronized void end() { } - private native static void initIDs(); - - private native int compressBytesDirect(); - - private native int compressBytesDirectHC(); - - public native static String getLibraryName(); + private int compressDirectBuf() { + if (uncompressedDirectBufLen == 0) { + return 0; + } else { + // Set the position and limit of `uncompressedDirectBuf` for reading + uncompressedDirectBuf.limit(uncompressedDirectBufLen).position(0); + compressedDirectBuf.clear(); + lz4Compressor.compress((ByteBuffer) uncompressedDirectBuf, + (ByteBuffer) compressedDirectBuf); + uncompressedDirectBufLen = 0; + uncompressedDirectBuf.limit(directBufferSize).position(0); + int size = compressedDirectBuf.position(); + compressedDirectBuf.position(0); + return size; + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java index f26ae8481c3f9..2b62ef78b2859 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java @@ -22,8 +22,10 @@ import java.nio.Buffer; import java.nio.ByteBuffer; +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4SafeDecompressor; + import org.apache.hadoop.io.compress.Decompressor; -import org.apache.hadoop.util.NativeCodeLoader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,20 +46,7 @@ public class Lz4Decompressor implements Decompressor { private int userBufOff = 0, userBufLen = 0; private boolean finished; - static { - if (NativeCodeLoader.isNativeCodeLoaded()) { - // Initialize the native library - try { - initIDs(); - } catch (Throwable t) { - // Ignore failure to load/initialize lz4 - LOG.warn(t.toString()); - } - } else { - LOG.error("Cannot load " + Lz4Compressor.class.getName() + - " without native hadoop library!"); - } - } + private LZ4SafeDecompressor lz4Decompressor; /** * Creates a new compressor. @@ -67,6 +56,15 @@ public class Lz4Decompressor implements Decompressor { public Lz4Decompressor(int directBufferSize) { this.directBufferSize = directBufferSize; + try { + LZ4Factory lz4Factory = LZ4Factory.fastestInstance(); + lz4Decompressor = lz4Factory.safeDecompressor(); + } catch (AssertionError t) { + throw new RuntimeException("lz4-java library is not available: " + + "Lz4Decompressor has not been loaded. You need to add " + + "lz4-java.jar to your CLASSPATH. " + t, t); + } + compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); uncompressedDirectBuf.position(directBufferSize); @@ -200,7 +198,7 @@ public synchronized boolean finished() { * @param b Buffer for the compressed data * @param off Start offset of the data * @param len Size of the buffer - * @return The actual number of bytes of compressed data. + * @return The actual number of bytes of uncompressed data. * @throws IOException */ @Override @@ -228,7 +226,7 @@ public synchronized int decompress(byte[] b, int off, int len) uncompressedDirectBuf.limit(directBufferSize); // Decompress data - n = decompressBytesDirect(); + n = decompressDirectBuf(); uncompressedDirectBuf.limit(n); if (userBufLen <= 0) { @@ -272,7 +270,18 @@ public synchronized void end() { // do nothing } - private native static void initIDs(); - - private native int decompressBytesDirect(); + private int decompressDirectBuf() { + if (compressedDirectBufLen == 0) { + return 0; + } else { + compressedDirectBuf.limit(compressedDirectBufLen).position(0); + lz4Decompressor.decompress((ByteBuffer) compressedDirectBuf, + (ByteBuffer) uncompressedDirectBuf); + compressedDirectBufLen = 0; + compressedDirectBuf.clear(); + int size = uncompressedDirectBuf.position(); + uncompressedDirectBuf.position(0); + return size; + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java index 3d386800e4d87..2d514705d1e42 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java @@ -24,9 +24,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.Compressor; -import org.apache.hadoop.util.NativeCodeLoader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.xerial.snappy.Snappy; /** * A {@link Compressor} based on the snappy compression algorithm. @@ -48,24 +48,6 @@ public class SnappyCompressor implements Compressor { private long bytesRead = 0L; private long bytesWritten = 0L; - private static boolean nativeSnappyLoaded = false; - - static { - if (NativeCodeLoader.isNativeCodeLoaded() && - NativeCodeLoader.buildSupportsSnappy()) { - try { - initIDs(); - nativeSnappyLoaded = true; - } catch (Throwable t) { - LOG.error("failed to load SnappyCompressor", t); - } - } - } - - public static boolean isNativeCodeLoaded() { - return nativeSnappyLoaded; - } - /** * Creates a new compressor. * @@ -225,7 +207,7 @@ public int compress(byte[] b, int off, int len) } // Compress data - n = compressBytesDirect(); + n = compressDirectBuf(); compressedDirectBuf.limit(n); uncompressedDirectBuf.clear(); // snappy consumes all buffer input @@ -291,9 +273,16 @@ public long getBytesWritten() { public void end() { } - private native static void initIDs(); - - private native int compressBytesDirect(); - - public native static String getLibraryName(); + private int compressDirectBuf() throws IOException { + if (uncompressedDirectBufLen == 0) { + return 0; + } else { + // Set the position and limit of `uncompressedDirectBuf` for reading + uncompressedDirectBuf.limit(uncompressedDirectBufLen).position(0); + int size = Snappy.compress((ByteBuffer) uncompressedDirectBuf, + (ByteBuffer) compressedDirectBuf); + uncompressedDirectBufLen = 0; + return size; + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java index f31b76c347c5c..d3775e286e895 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java @@ -24,9 +24,9 @@ import org.apache.hadoop.io.compress.Decompressor; import org.apache.hadoop.io.compress.DirectDecompressor; -import org.apache.hadoop.util.NativeCodeLoader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.xerial.snappy.Snappy; /** * A {@link Decompressor} based on the snappy compression algorithm. @@ -45,24 +45,6 @@ public class SnappyDecompressor implements Decompressor { private int userBufOff = 0, userBufLen = 0; private boolean finished; - private static boolean nativeSnappyLoaded = false; - - static { - if (NativeCodeLoader.isNativeCodeLoaded() && - NativeCodeLoader.buildSupportsSnappy()) { - try { - initIDs(); - nativeSnappyLoaded = true; - } catch (Throwable t) { - LOG.error("failed to load SnappyDecompressor", t); - } - } - } - - public static boolean isNativeCodeLoaded() { - return nativeSnappyLoaded; - } - /** * Creates a new compressor. * @@ -201,7 +183,7 @@ public boolean finished() { * {@link #needsInput()} should be called in order to determine if more * input data is required. * - * @param b Buffer for the compressed data + * @param b Buffer for the uncompressed data * @param off Start offset of the data * @param len Size of the buffer * @return The actual number of bytes of compressed data. @@ -232,7 +214,7 @@ public int decompress(byte[] b, int off, int len) uncompressedDirectBuf.limit(directBufferSize); // Decompress data - n = decompressBytesDirect(); + n = decompressDirectBuf(); uncompressedDirectBuf.limit(n); if (userBufLen <= 0) { @@ -276,10 +258,20 @@ public void end() { // do nothing } - private native static void initIDs(); + private int decompressDirectBuf() throws IOException { + if (compressedDirectBufLen == 0) { + return 0; + } else { + // Set the position and limit of `compressedDirectBuf` for reading + compressedDirectBuf.limit(compressedDirectBufLen).position(0); + int size = Snappy.uncompress((ByteBuffer) compressedDirectBuf, + (ByteBuffer) uncompressedDirectBuf); + compressedDirectBufLen = 0; + compressedDirectBuf.clear(); + return size; + } + } - private native int decompressBytesDirect(); - int decompressDirect(ByteBuffer src, ByteBuffer dst) throws IOException { assert (this instanceof SnappyDirectDecompressor); @@ -298,7 +290,7 @@ int decompressDirect(ByteBuffer src, ByteBuffer dst) throws IOException { directBufferSize = dst.remaining(); int n = 0; try { - n = decompressBytesDirect(); + n = decompressDirectBuf(); presliced.position(presliced.position() + n); // SNAPPY always consumes the whole buffer or throws an exception src.position(src.limit()); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java index 07afbab7246b7..7b3099819497f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java @@ -26,7 +26,7 @@ import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy; import org.apache.hadoop.util.NativeCodeLoader; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java index 7445502c989d8..8b1b6db086021 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java @@ -18,7 +18,7 @@ package org.apache.hadoop.io.compress.zstd; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -159,7 +159,7 @@ public boolean needsInput() { } // have we consumed all input - if (keepUncompressedBuf && uncompressedDirectBufLen > 0) { + if (keepUncompressedBuf && uncompressedDirectBufLen - uncompressedDirectBufOff > 0) { return false; } @@ -223,7 +223,7 @@ public int compress(byte[] b, int off, int len) throws IOException { compressedDirectBuf.limit(n); // Check if we have consumed all input buffer - if (uncompressedDirectBufLen <= 0) { + if (uncompressedDirectBufLen - uncompressedDirectBufOff <= 0) { // consumed all input buffer keepUncompressedBuf = false; uncompressedDirectBuf.clear(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java index daf91e22ecc91..359e07e27fd2a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.io.erasurecode; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; import org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java index 5ba6e9c0dd1ed..c871ce3067e00 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.io.erasurecode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.erasurecode.codec.ErasureCodec; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java index c4347e0c1afab..6eee025a2339d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java @@ -24,7 +24,7 @@ import java.io.OutputStream; import java.util.ArrayList; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CompressionCodec; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index 160b8e029e56b..06eb7a68aba72 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory; import sun.misc.Unsafe; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * JNI wrappers for various native IO-related calls not available in Java. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java index 69e1233e16b07..d4921f7f57993 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.io.retry; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.ipc.Client; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java index 022b78507fcee..7ccd6deb7f913 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.io.retry; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** The call return from a method invocation. */ class CallReturn { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java index 64824a15cd89c..8f299aa1de282 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.io.retry; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo; import org.apache.hadoop.io.retry.RetryPolicy.RetryAction; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java index fcbcc868cf6dd..e6ccd7671b631 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java @@ -43,7 +43,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.ietf.jgss.GSSException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java index 81b7d34d0d1e0..661d7168b058e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java @@ -32,7 +32,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -208,6 +209,19 @@ int getPriorityLevel(Schedulable e) { return scheduler.getPriorityLevel(e); } + int getPriorityLevel(UserGroupInformation user) { + if (scheduler instanceof DecayRpcScheduler) { + return ((DecayRpcScheduler)scheduler).getPriorityLevel(user); + } + return 0; + } + + void setPriorityLevel(UserGroupInformation user, int priority) { + if (scheduler instanceof DecayRpcScheduler) { + ((DecayRpcScheduler)scheduler).setPriorityLevel(user, priority); + } + } + void setClientBackoffEnabled(boolean value) { clientBackOffEnabled = value; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 688eed647c209..4a0b5aec40481 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -18,9 +18,10 @@ package org.apache.hadoop.ipc; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability; @@ -649,6 +650,7 @@ private synchronized boolean updateAddress() throws IOException { private synchronized void setupConnection( UserGroupInformation ticket) throws IOException { + LOG.debug("Setup connection to " + server.toString()); short ioFailures = 0; short timeoutFailures = 0; while (true) { @@ -711,8 +713,16 @@ private synchronized void setupConnection( } catch (IOException ie) { if (updateAddress()) { timeoutFailures = ioFailures = 0; + try { + // HADOOP-17068: when server changed, ignore the exception. + handleConnectionFailure(ioFailures++, ie); + } catch (IOException ioe) { + LOG.warn("Exception when handle ConnectionFailure: " + + ioe.getMessage()); + } + } else { + handleConnectionFailure(ioFailures++, ie); } - handleConnectionFailure(ioFailures++, ie); } } } @@ -848,7 +858,8 @@ public AuthMethod run() } } else if (UserGroupInformation.isSecurityEnabled()) { if (!fallbackAllowed) { - throw new IOException("Server asks us to fall back to SIMPLE " + + throw new AccessControlException( + "Server asks us to fall back to SIMPLE " + "auth, but this client is configured to only allow secure " + "connections."); } @@ -1277,7 +1288,7 @@ private synchronized void close() { cleanupCalls(); } } else { - // log the info + // Log the newest server information if update address. if (LOG.isDebugEnabled()) { LOG.debug("closing ipc connection to " + server + ": " + closeException.getMessage(),closeException); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java index c95dbb9facaac..cc6581618b5f1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A class defining a set of static helper methods to provide conversion between diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 3e952eb63c3ff..cdd62fed6cc3c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -25,6 +25,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.ConcurrentHashMap; @@ -37,12 +38,14 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.AtomicDoubleArray; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.AtomicDoubleArray; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.metrics.DecayRpcSchedulerDetailedMetrics; +import org.apache.hadoop.ipc.metrics.RpcMetrics; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; @@ -52,7 +55,7 @@ import org.apache.hadoop.metrics2.util.Metrics2Util.NameValuePair; import org.apache.hadoop.metrics2.util.Metrics2Util.TopN; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -178,6 +181,7 @@ public class DecayRpcScheduler implements RpcScheduler, private MetricsProxy metricsProxy; private final CostProvider costProvider; + private final Map staticPriorities = new HashMap<>(); /** * This TimerTask will call decayCurrentCosts until * the scheduler has been garbage collected. @@ -485,7 +489,7 @@ private void recomputeScheduleCache() { AtomicLong value = entry.getValue().get(0); long snapshot = value.get(); - int computedLevel = computePriorityLevel(snapshot); + int computedLevel = computePriorityLevel(snapshot, id); nextCache.put(id, computedLevel); } @@ -535,7 +539,11 @@ private void addCost(Object identity, long costDelta) { * @param cost the cost for an identity * @return scheduling decision from 0 to numLevels - 1 */ - private int computePriorityLevel(long cost) { + private int computePriorityLevel(long cost, Object identity) { + Integer staticPriority = staticPriorities.get(identity); + if (staticPriority != null) { + return staticPriority.intValue(); + } long totalCallSnapshot = totalDecayedCallCost.get(); double proportion = 0; @@ -575,11 +583,20 @@ private int cachedOrComputedPriorityLevel(Object identity) { // Cache was no good, compute it List costList = callCosts.get(identity); long currentCost = costList == null ? 0 : costList.get(0).get(); - int priority = computePriorityLevel(currentCost); + int priority = computePriorityLevel(currentCost, identity); LOG.debug("compute priority for {} priority {}", identity, priority); return priority; } + private String getIdentity(Schedulable obj) { + String identity = this.identityProvider.makeIdentity(obj); + if (identity == null) { + // Identity provider did not handle this + identity = DECAYSCHEDULER_UNKNOWN_IDENTITY; + } + return identity; + } + /** * Compute the appropriate priority for a schedulable based on past requests. * @param obj the schedulable obj to query and remember @@ -588,15 +605,42 @@ private int cachedOrComputedPriorityLevel(Object identity) { @Override public int getPriorityLevel(Schedulable obj) { // First get the identity - String identity = this.identityProvider.makeIdentity(obj); - if (identity == null) { - // Identity provider did not handle this - identity = DECAYSCHEDULER_UNKNOWN_IDENTITY; - } + String identity = getIdentity(obj); + // highest priority users may have a negative priority but their + // calls will be priority 0. + return Math.max(0, cachedOrComputedPriorityLevel(identity)); + } + @VisibleForTesting + int getPriorityLevel(UserGroupInformation ugi) { + String identity = getIdentity(newSchedulable(ugi)); + // returns true priority of the user. return cachedOrComputedPriorityLevel(identity); } + @VisibleForTesting + void setPriorityLevel(UserGroupInformation ugi, int priority) { + String identity = getIdentity(newSchedulable(ugi)); + priority = Math.min(numLevels - 1, priority); + LOG.info("Setting priority for user:" + identity + "=" + priority); + staticPriorities.put(identity, priority); + } + + // dummy instance to conform to identity provider api. + private static Schedulable newSchedulable(UserGroupInformation ugi) { + return new Schedulable() { + @Override + public UserGroupInformation getUserGroupInformation() { + return ugi; + } + + @Override + public int getPriorityLevel() { + return 0; + } + }; + } + @Override public boolean shouldBackOff(Schedulable obj) { Boolean backOff = false; @@ -632,8 +676,8 @@ public void addResponseTime(String callName, Schedulable schedulable, addCost(user, processingCost); int priorityLevel = schedulable.getPriorityLevel(); - long queueTime = details.get(Timing.QUEUE, TimeUnit.MILLISECONDS); - long processingTime = details.get(Timing.PROCESSING, TimeUnit.MILLISECONDS); + long queueTime = details.get(Timing.QUEUE, RpcMetrics.TIMEUNIT); + long processingTime = details.get(Timing.PROCESSING, RpcMetrics.TIMEUNIT); this.decayRpcSchedulerDetailedMetrics.addQueueTime( priorityLevel, queueTime); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java index d15a71000bd54..26b1c25dd90b3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java @@ -31,7 +31,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java index bb86cfc35bf4e..1e110b9011313 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java @@ -53,6 +53,23 @@ public static IOException getRemoteException(ServiceException se) { return e instanceof IOException ? (IOException) e : new IOException(se); } + /** + * Kept for backward compatible. + * Return the IOException thrown by the remote server wrapped in + * ServiceException as cause. + * @param se ServiceException that wraps IO exception thrown by the server + * @return Exception wrapped in ServiceException or + * a new IOException that wraps the unexpected ServiceException. + */ + @Deprecated + public static IOException getRemoteException( + com.google.protobuf.ServiceException se) { + Throwable e = se.getCause(); + if (e == null) { + return new IOException(se); + } + return e instanceof IOException ? (IOException) e : new IOException(se); + } /** * Map used to cache fixed strings to ByteStrings. Since there is no diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java index 14b356f847acf..b7b7ad4db65cd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java @@ -18,9 +18,12 @@ package org.apache.hadoop.ipc; -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.protobuf.*; -import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.BlockingService; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.ServiceException; +import com.google.protobuf.TextFormat; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -29,6 +32,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.ipc.RPC.RpcInvoker; +import org.apache.hadoop.ipc.RPC.RpcKind; import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; @@ -52,7 +56,10 @@ /** * RPC Engine for for protobuf based RPCs. + * This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x + * from hadoop-thirdparty and use ProtobufRpcEngine2. */ +@Deprecated @InterfaceStability.Evolving public class ProtobufRpcEngine implements RpcEngine { public static final Logger LOG = @@ -355,6 +362,7 @@ public static class Server extends RPC.Server { new ThreadLocal<>(); static final ThreadLocal currentCallInfo = new ThreadLocal<>(); + private static final RpcInvoker RPC_INVOKER = new ProtoBufRpcInvoker(); static class CallInfo { private final RPC.Server server; @@ -433,7 +441,15 @@ public Server(Class protocolClass, Object protocolImpl, registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass, protocolImpl); } - + + @Override + protected RpcInvoker getServerRpcInvoker(RpcKind rpcKind) { + if (rpcKind == RpcKind.RPC_PROTOCOL_BUFFER) { + return RPC_INVOKER; + } + return super.getServerRpcInvoker(rpcKind); + } + /** * Protobuf invoker for {@link RpcInvoker} */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java new file mode 100644 index 0000000000000..fa1f1068bd98c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine2.java @@ -0,0 +1,598 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ipc; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.protobuf.*; +import org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.ipc.Client.ConnectionId; +import org.apache.hadoop.ipc.RPC.RpcInvoker; +import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngine2Protos.RequestHeaderProto; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.AsyncGet; +import org.apache.htrace.core.TraceScope; +import org.apache.htrace.core.Tracer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.SocketFactory; +import java.io.IOException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetSocketAddress; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * RPC Engine for for protobuf based RPCs. + */ +@InterfaceStability.Evolving +public class ProtobufRpcEngine2 implements RpcEngine { + public static final Logger LOG = + LoggerFactory.getLogger(ProtobufRpcEngine2.class); + private static final ThreadLocal> + ASYNC_RETURN_MESSAGE = new ThreadLocal<>(); + + static { // Register the rpcRequest deserializer for ProtobufRpcEngine + org.apache.hadoop.ipc.Server.registerProtocolEngine( + RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class, + new Server.ProtoBufRpcInvoker()); + } + + private static final ClientCache CLIENTS = new ClientCache(); + + @Unstable + public static AsyncGet getAsyncReturnMessage() { + return ASYNC_RETURN_MESSAGE.get(); + } + + public ProtocolProxy getProxy(Class protocol, long clientVersion, + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory, int rpcTimeout) throws IOException { + return getProxy(protocol, clientVersion, addr, ticket, conf, factory, + rpcTimeout, null); + } + + @Override + public ProtocolProxy getProxy( + Class protocol, long clientVersion, + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy) + throws IOException { + return getProxy(protocol, clientVersion, addr, ticket, conf, factory, + rpcTimeout, connectionRetryPolicy, null, null); + } + + @Override + @SuppressWarnings("unchecked") + public ProtocolProxy getProxy(Class protocol, long clientVersion, + InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, + SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy, + AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext) + throws IOException { + + final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory, + rpcTimeout, connectionRetryPolicy, fallbackToSimpleAuth, + alignmentContext); + return new ProtocolProxy(protocol, (T) Proxy.newProxyInstance( + protocol.getClassLoader(), new Class[]{protocol}, invoker), false); + } + + @Override + public ProtocolProxy getProtocolMetaInfoProxy( + ConnectionId connId, Configuration conf, SocketFactory factory) + throws IOException { + Class protocol = ProtocolMetaInfoPB.class; + return new ProtocolProxy(protocol, + (ProtocolMetaInfoPB) Proxy.newProxyInstance(protocol.getClassLoader(), + new Class[]{protocol}, new Invoker(protocol, connId, conf, + factory)), false); + } + + private static final class Invoker implements RpcInvocationHandler { + private final Map returnTypes = + new ConcurrentHashMap(); + private boolean isClosed = false; + private final Client.ConnectionId remoteId; + private final Client client; + private final long clientProtocolVersion; + private final String protocolName; + private AtomicBoolean fallbackToSimpleAuth; + private AlignmentContext alignmentContext; + + private Invoker(Class protocol, InetSocketAddress addr, + UserGroupInformation ticket, Configuration conf, SocketFactory factory, + int rpcTimeout, RetryPolicy connectionRetryPolicy, + AtomicBoolean fallbackToSimpleAuth, AlignmentContext alignmentContext) + throws IOException { + this(protocol, Client.ConnectionId.getConnectionId( + addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf), + conf, factory); + this.fallbackToSimpleAuth = fallbackToSimpleAuth; + this.alignmentContext = alignmentContext; + } + + /** + * This constructor takes a connectionId, instead of creating a new one. + */ + private Invoker(Class protocol, Client.ConnectionId connId, + Configuration conf, SocketFactory factory) { + this.remoteId = connId; + this.client = CLIENTS.getClient(conf, factory, RpcWritable.Buffer.class); + this.protocolName = RPC.getProtocolName(protocol); + this.clientProtocolVersion = RPC + .getProtocolVersion(protocol); + } + + private RequestHeaderProto constructRpcRequestHeader(Method method) { + RequestHeaderProto.Builder builder = RequestHeaderProto + .newBuilder(); + builder.setMethodName(method.getName()); + + + // For protobuf, {@code protocol} used when creating client side proxy is + // the interface extending BlockingInterface, which has the annotations + // such as ProtocolName etc. + // + // Using Method.getDeclaringClass(), as in WritableEngine to get at + // the protocol interface will return BlockingInterface, from where + // the annotation ProtocolName and Version cannot be + // obtained. + // + // Hence we simply use the protocol class used to create the proxy. + // For PB this may limit the use of mixins on client side. + builder.setDeclaringClassProtocolName(protocolName); + builder.setClientProtocolVersion(clientProtocolVersion); + return builder.build(); + } + + /** + * This is the client side invoker of RPC method. It only throws + * ServiceException, since the invocation proxy expects only + * ServiceException to be thrown by the method in case protobuf service. + * + * ServiceException has the following causes: + *

      + *
    1. Exceptions encountered on the client side in this method are + * set as cause in ServiceException as is.
    2. + *
    3. Exceptions from the server are wrapped in RemoteException and are + * set as cause in ServiceException
    4. + *
    + * + * Note that the client calling protobuf RPC methods, must handle + * ServiceException by getting the cause from the ServiceException. If the + * cause is RemoteException, then unwrap it to get the exception thrown by + * the server. + */ + @Override + public Message invoke(Object proxy, final Method method, Object[] args) + throws ServiceException { + long startTime = 0; + if (LOG.isDebugEnabled()) { + startTime = Time.now(); + } + + if (args.length != 2) { // RpcController + Message + throw new ServiceException( + "Too many or few parameters for request. Method: [" + + method.getName() + "]" + ", Expected: 2, Actual: " + + args.length); + } + if (args[1] == null) { + throw new ServiceException("null param while calling Method: [" + + method.getName() + "]"); + } + + // if Tracing is on then start a new span for this rpc. + // guard it in the if statement to make sure there isn't + // any extra string manipulation. + Tracer tracer = Tracer.curThreadTracer(); + TraceScope traceScope = null; + if (tracer != null) { + traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method)); + } + + RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method); + + if (LOG.isTraceEnabled()) { + LOG.trace(Thread.currentThread().getId() + ": Call -> " + + remoteId + ": " + method.getName() + + " {" + TextFormat.shortDebugString((Message) args[1]) + "}"); + } + + + final Message theRequest = (Message) args[1]; + final RpcWritable.Buffer val; + try { + val = (RpcWritable.Buffer) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER, + new RpcProtobufRequest(rpcRequestHeader, theRequest), remoteId, + fallbackToSimpleAuth, alignmentContext); + + } catch (Throwable e) { + if (LOG.isTraceEnabled()) { + LOG.trace(Thread.currentThread().getId() + ": Exception <- " + + remoteId + ": " + method.getName() + + " {" + e + "}"); + } + if (traceScope != null) { + traceScope.addTimelineAnnotation("Call got exception: " + + e.toString()); + } + throw new ServiceException(e); + } finally { + if (traceScope != null) { + traceScope.close(); + } + } + + if (LOG.isDebugEnabled()) { + long callTime = Time.now() - startTime; + LOG.debug("Call: " + method.getName() + " took " + callTime + "ms"); + } + + if (Client.isAsynchronousMode()) { + final AsyncGet arr + = Client.getAsyncRpcResponse(); + final AsyncGet asyncGet = + new AsyncGet() { + @Override + public Message get(long timeout, TimeUnit unit) throws Exception { + return getReturnMessage(method, arr.get(timeout, unit)); + } + + @Override + public boolean isDone() { + return arr.isDone(); + } + }; + ASYNC_RETURN_MESSAGE.set(asyncGet); + return null; + } else { + return getReturnMessage(method, val); + } + } + + private Message getReturnMessage(final Method method, + final RpcWritable.Buffer buf) throws ServiceException { + Message prototype = null; + try { + prototype = getReturnProtoType(method); + } catch (Exception e) { + throw new ServiceException(e); + } + Message returnMessage; + try { + returnMessage = buf.getValue(prototype.getDefaultInstanceForType()); + + if (LOG.isTraceEnabled()) { + LOG.trace(Thread.currentThread().getId() + ": Response <- " + + remoteId + ": " + method.getName() + + " {" + TextFormat.shortDebugString(returnMessage) + "}"); + } + + } catch (Throwable e) { + throw new ServiceException(e); + } + return returnMessage; + } + + @Override + public void close() throws IOException { + if (!isClosed) { + isClosed = true; + CLIENTS.stopClient(client); + } + } + + private Message getReturnProtoType(Method method) throws Exception { + if (returnTypes.containsKey(method.getName())) { + return returnTypes.get(method.getName()); + } + + Class returnType = method.getReturnType(); + Method newInstMethod = returnType.getMethod("getDefaultInstance"); + newInstMethod.setAccessible(true); + Message prototype = (Message) newInstMethod.invoke(null, (Object[]) null); + returnTypes.put(method.getName(), prototype); + return prototype; + } + + @Override //RpcInvocationHandler + public ConnectionId getConnectionId() { + return remoteId; + } + } + + @VisibleForTesting + @InterfaceAudience.Private + @InterfaceStability.Unstable + static Client getClient(Configuration conf) { + return CLIENTS.getClient(conf, SocketFactory.getDefault(), + RpcWritable.Buffer.class); + } + + + + @Override + public RPC.Server getServer(Class protocol, Object protocolImpl, + String bindAddress, int port, int numHandlers, int numReaders, + int queueSizePerHandler, boolean verbose, Configuration conf, + SecretManager secretManager, + String portRangeConfig, AlignmentContext alignmentContext) + throws IOException { + return new Server(protocol, protocolImpl, conf, bindAddress, port, + numHandlers, numReaders, queueSizePerHandler, verbose, secretManager, + portRangeConfig, alignmentContext); + } + + public static class Server extends RPC.Server { + + static final ThreadLocal CURRENT_CALLBACK = + new ThreadLocal<>(); + + static final ThreadLocal CURRENT_CALL_INFO = new ThreadLocal<>(); + + static class CallInfo { + private final RPC.Server server; + private final String methodName; + + CallInfo(RPC.Server server, String methodName) { + this.server = server; + this.methodName = methodName; + } + } + + static class ProtobufRpcEngineCallbackImpl + implements ProtobufRpcEngineCallback2 { + + private final RPC.Server server; + private final Call call; + private final String methodName; + private final long setupTime; + + ProtobufRpcEngineCallbackImpl() { + this.server = CURRENT_CALL_INFO.get().server; + this.call = Server.getCurCall().get(); + this.methodName = CURRENT_CALL_INFO.get().methodName; + this.setupTime = Time.now(); + } + + @Override + public void setResponse(Message message) { + long processingTime = Time.now() - setupTime; + call.setDeferredResponse(RpcWritable.wrap(message)); + server.updateDeferredMetrics(methodName, processingTime); + } + + @Override + public void error(Throwable t) { + long processingTime = Time.now() - setupTime; + String detailedMetricsName = t.getClass().getSimpleName(); + server.updateDeferredMetrics(detailedMetricsName, processingTime); + call.setDeferredError(t); + } + } + + @InterfaceStability.Unstable + public static ProtobufRpcEngineCallback2 registerForDeferredResponse() { + ProtobufRpcEngineCallback2 callback = new ProtobufRpcEngineCallbackImpl(); + CURRENT_CALLBACK.set(callback); + return callback; + } + + /** + * Construct an RPC server. + * + * @param protocolClass the class of protocol + * @param protocolImpl the protocolImpl whose methods will be called + * @param conf the configuration to use + * @param bindAddress the address to bind on to listen for connection + * @param port the port to listen for connections on + * @param numHandlers the number of method handler threads to run + * @param verbose whether each call should be logged + * @param portRangeConfig A config parameter that can be used to restrict + * the range of ports used when port is 0 (an ephemeral port) + * @param alignmentContext provides server state info on client responses + */ + public Server(Class protocolClass, Object protocolImpl, + Configuration conf, String bindAddress, int port, int numHandlers, + int numReaders, int queueSizePerHandler, boolean verbose, + SecretManager secretManager, + String portRangeConfig, AlignmentContext alignmentContext) + throws IOException { + super(bindAddress, port, null, numHandlers, + numReaders, queueSizePerHandler, conf, + serverNameFromClass(protocolImpl.getClass()), secretManager, + portRangeConfig); + setAlignmentContext(alignmentContext); + this.verbose = verbose; + registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass, + protocolImpl); + } + + /** + * Protobuf invoker for {@link RpcInvoker}. + */ + static class ProtoBufRpcInvoker implements RpcInvoker { + private static ProtoClassProtoImpl getProtocolImpl(RPC.Server server, + String protoName, long clientVersion) throws RpcServerException { + ProtoNameVer pv = new ProtoNameVer(protoName, clientVersion); + ProtoClassProtoImpl impl = + server.getProtocolImplMap(RPC.RpcKind.RPC_PROTOCOL_BUFFER).get(pv); + if (impl == null) { // no match for Protocol AND Version + VerProtocolImpl highest = server.getHighestSupportedProtocol( + RPC.RpcKind.RPC_PROTOCOL_BUFFER, protoName); + if (highest == null) { + throw new RpcNoSuchProtocolException( + "Unknown protocol: " + protoName); + } + // protocol supported but not the version that client wants + throw new RPC.VersionMismatch(protoName, clientVersion, + highest.version); + } + return impl; + } + + @Override + /** + * This is a server side method, which is invoked over RPC. On success + * the return response has protobuf response payload. On failure, the + * exception name and the stack trace are returned in the response. + * See {@link HadoopRpcResponseProto} + * + * In this method there three types of exceptions possible and they are + * returned in response as follows. + *
      + *
    1. Exceptions encountered in this method that are returned + * as {@link RpcServerException}
    2. + *
    3. Exceptions thrown by the service is wrapped in ServiceException. + * In that this method returns in response the exception thrown by the + * service.
    4. + *
    5. Other exceptions thrown by the service. They are returned as + * it is.
    6. + *
    + */ + public Writable call(RPC.Server server, String connectionProtocolName, + Writable writableRequest, long receiveTime) throws Exception { + RpcProtobufRequest request = (RpcProtobufRequest) writableRequest; + RequestHeaderProto rpcRequest = request.getRequestHeader(); + String methodName = rpcRequest.getMethodName(); + + /** + * RPCs for a particular interface (ie protocol) are done using a + * IPC connection that is setup using rpcProxy. + * The rpcProxy's has a declared protocol name that is + * sent form client to server at connection time. + * + * Each Rpc call also sends a protocol name + * (called declaringClassprotocolName). This name is usually the same + * as the connection protocol name except in some cases. + * For example metaProtocols such ProtocolInfoProto which get info + * about the protocol reuse the connection but need to indicate that + * the actual protocol is different (i.e. the protocol is + * ProtocolInfoProto) since they reuse the connection; in this case + * the declaringClassProtocolName field is set to the ProtocolInfoProto. + */ + + String declaringClassProtoName = + rpcRequest.getDeclaringClassProtocolName(); + long clientVersion = rpcRequest.getClientProtocolVersion(); + if (server.verbose) { + LOG.info("Call: connectionProtocolName=" + connectionProtocolName + + ", method=" + methodName); + } + + ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, + declaringClassProtoName, clientVersion); + BlockingService service = (BlockingService) protocolImpl.protocolImpl; + MethodDescriptor methodDescriptor = service.getDescriptorForType() + .findMethodByName(methodName); + if (methodDescriptor == null) { + String msg = "Unknown method " + methodName + " called on " + + connectionProtocolName + " protocol."; + LOG.warn(msg); + throw new RpcNoSuchMethodException(msg); + } + Message prototype = service.getRequestPrototype(methodDescriptor); + Message param = request.getValue(prototype); + + Message result; + Call currentCall = Server.getCurCall().get(); + try { + server.rpcDetailedMetrics.init(protocolImpl.protocolClass); + CURRENT_CALL_INFO.set(new CallInfo(server, methodName)); + currentCall.setDetailedMetricsName(methodName); + result = service.callBlockingMethod(methodDescriptor, null, param); + // Check if this needs to be a deferred response, + // by checking the ThreadLocal callback being set + if (CURRENT_CALLBACK.get() != null) { + currentCall.deferResponse(); + CURRENT_CALLBACK.set(null); + return null; + } + } catch (ServiceException e) { + Exception exception = (Exception) e.getCause(); + currentCall.setDetailedMetricsName( + exception.getClass().getSimpleName()); + throw (Exception) e.getCause(); + } catch (Exception e) { + currentCall.setDetailedMetricsName(e.getClass().getSimpleName()); + throw e; + } finally { + CURRENT_CALL_INFO.set(null); + } + return RpcWritable.wrap(result); + } + } + } + + // htrace in the ipc layer creates the span name based on toString() + // which uses the rpc header. in the normal case we want to defer decoding + // the rpc header until needed by the rpc engine. + static class RpcProtobufRequest extends RpcWritable.Buffer { + private volatile RequestHeaderProto requestHeader; + private Message payload; + + RpcProtobufRequest() { + } + + RpcProtobufRequest(RequestHeaderProto header, Message payload) { + this.requestHeader = header; + this.payload = payload; + } + + RequestHeaderProto getRequestHeader() throws IOException { + if (getByteBuffer() != null && requestHeader == null) { + requestHeader = getValue(RequestHeaderProto.getDefaultInstance()); + } + return requestHeader; + } + + @Override + public void writeTo(ResponseBuffer out) throws IOException { + requestHeader.writeDelimitedTo(out); + if (payload != null) { + payload.writeDelimitedTo(out); + } + } + + // this is used by htrace to name the span. + @Override + public String toString() { + try { + RequestHeaderProto header = getRequestHeader(); + return header.getDeclaringClassProtocolName() + "." + + header.getMethodName(); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java index 50b70ca4bec1a..f85adb17d3f8e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback.java @@ -18,12 +18,17 @@ package org.apache.hadoop.ipc; -import org.apache.hadoop.thirdparty.protobuf.Message; +import com.google.protobuf.Message; +/** + * This engine uses Protobuf 2.5.0. Recommended to upgrade to Protobuf 3.x + * from hadoop-thirdparty and use ProtobufRpcEngineCallback2. + */ +@Deprecated public interface ProtobufRpcEngineCallback { - public void setResponse(Message message); + void setResponse(Message message); - public void error(Throwable t); + void error(Throwable t); } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java similarity index 73% rename from hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java index 8394efe477462..e8c09f56282e6 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngineCallback2.java @@ -16,18 +16,14 @@ * limitations under the License. */ +package org.apache.hadoop.ipc; -#ifndef ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H -#define ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H +import org.apache.hadoop.thirdparty.protobuf.Message; -#include "org_apache_hadoop.h" +public interface ProtobufRpcEngineCallback2 { -#ifdef UNIX -#include -#endif + public void setResponse(Message message); -#include -#include -#include + public void error(Throwable t); -#endif //ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java index 69d18eacd34d6..e1a1f0eb3e7a1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java @@ -29,7 +29,7 @@ import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.io.WritableFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class ProtocolSignature implements Writable { static { // register a ctor diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java index 99eb487be495c..835d8065bd391 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ipc; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import java.io.Closeable; import java.io.IOException; import java.lang.reflect.InvocationHandler; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java index 4f95863b03db6..6169fef7f6d16 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java @@ -51,6 +51,7 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; @@ -195,14 +196,18 @@ private RPC() {} // no public ctor private static final String ENGINE_PROP = "rpc.engine"; /** - * Set a protocol to use a non-default RpcEngine. + * Set a protocol to use a non-default RpcEngine if one + * is not specified in the configuration. * @param conf configuration to use * @param protocol the protocol interface * @param engine the RpcEngine impl */ public static void setProtocolEngine(Configuration conf, Class protocol, Class engine) { - conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, RpcEngine.class); + if (conf.get(ENGINE_PROP+"."+protocol.getName()) == null) { + conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, + RpcEngine.class); + } } // return the RpcEngine configured to handle a protocol @@ -976,7 +981,18 @@ void registerProtocolAndImpl(RpcKind rpcKind, Class protocolClass, " ProtocolImpl=" + protocolImpl.getClass().getName() + " protocolClass=" + protocolClass.getName()); } - } + String client = SecurityUtil.getClientPrincipal(protocolClass, getConf()); + if (client != null) { + // notify the server's rpc scheduler that the protocol user has + // highest priority. the scheduler should exempt the user from + // priority calculations. + try { + setPriorityLevel(UserGroupInformation.createRemoteUser(client), -1); + } catch (Exception ex) { + LOG.warn("Failed to set scheduling priority for " + client, ex); + } + } + } static class VerProtocolImpl { final long version; @@ -1043,7 +1059,7 @@ protected Server(String bindAddress, int port, private void initProtocolMetaInfo(Configuration conf) { RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ProtocolMetaInfoServerSideTranslatorPB xlator = new ProtocolMetaInfoServerSideTranslatorPB(this); BlockingService protocolInfoBlockingService = ProtocolInfoService @@ -1067,7 +1083,7 @@ public Server addProtocol(RpcKind rpcKind, Class protocolClass, @Override public Writable call(RPC.RpcKind rpcKind, String protocol, Writable rpcRequest, long receiveTime) throws Exception { - return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest, + return getServerRpcInvoker(rpcKind).call(this, protocol, rpcRequest, receiveTime); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java index e67e8d9cbeb92..95f1323410fe0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java @@ -20,9 +20,9 @@ import java.util.ArrayList; import java.util.Collection; -import com.google.common.base.Joiner; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java index 4bde261eab1b0..d71edded9caec 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java @@ -28,8 +28,8 @@ import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.LightWeightGSet.LinkedElement; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java index 84ecba1d34e9c..0ce78e54a43a0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java @@ -114,7 +114,7 @@ public static boolean isMethodSupported(Object rpcProxy, Class protocol, if (versionMap == null) { Configuration conf = new Configuration(); RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ProtocolMetaInfoPB protocolInfoProxy = getProtocolMetaInfoProxy(rpcProxy, conf); GetProtocolSignatureRequestProto.Builder builder = diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java index 63812f47f2db0..5202c6b356177 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ipc; -import java.util.concurrent.TimeUnit; +import org.apache.hadoop.ipc.metrics.RpcMetrics; /** * Implement this interface to be used for RPC scheduling and backoff. @@ -62,12 +62,12 @@ default void addResponseTime(String callName, Schedulable schedulable, // this interface, a default implementation is supplied which uses the old // method. All new implementations MUST override this interface and should // NOT use the other addResponseTime method. - int queueTimeMs = (int) - details.get(ProcessingDetails.Timing.QUEUE, TimeUnit.MILLISECONDS); - int processingTimeMs = (int) - details.get(ProcessingDetails.Timing.PROCESSING, TimeUnit.MILLISECONDS); + int queueTime = (int) + details.get(ProcessingDetails.Timing.QUEUE, RpcMetrics.TIMEUNIT); + int processingTime = (int) + details.get(ProcessingDetails.Timing.PROCESSING, RpcMetrics.TIMEUNIT); addResponseTime(callName, schedulable.getPriorityLevel(), - queueTimeMs, processingTimeMs); + queueTime, processingTime); } void stop(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java index 6604bd0cc1c68..f5f0d071f39ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java @@ -42,6 +42,8 @@ static RpcWritable wrap(Object o) { return (RpcWritable)o; } else if (o instanceof Message) { return new ProtobufWrapper((Message)o); + } else if (o instanceof com.google.protobuf.Message) { + return new ProtobufWrapperLegacy((com.google.protobuf.Message) o); } else if (o instanceof Writable) { return new WritableWrapper((Writable)o); } @@ -132,6 +134,49 @@ T readFrom(ByteBuffer bb) throws IOException { } } + // adapter for Protobufs. + static class ProtobufWrapperLegacy extends RpcWritable { + private com.google.protobuf.Message message; + + ProtobufWrapperLegacy(com.google.protobuf.Message message) { + this.message = message; + } + + com.google.protobuf.Message getMessage() { + return message; + } + + @Override + void writeTo(ResponseBuffer out) throws IOException { + int length = message.getSerializedSize(); + length += com.google.protobuf.CodedOutputStream. + computeUInt32SizeNoTag(length); + out.ensureCapacity(length); + message.writeDelimitedTo(out); + } + + @SuppressWarnings("unchecked") + @Override + T readFrom(ByteBuffer bb) throws IOException { + // using the parser with a byte[]-backed coded input stream is the + // most efficient way to deserialize a protobuf. it has a direct + // path to the PB ctor that doesn't create multi-layered streams + // that internally buffer. + com.google.protobuf.CodedInputStream cis = + com.google.protobuf.CodedInputStream.newInstance( + bb.array(), bb.position() + bb.arrayOffset(), bb.remaining()); + try { + cis.pushLimit(cis.readRawVarint32()); + message = message.getParserForType().parseFrom(cis); + cis.checkLastTagWas(0); + } finally { + // advance over the bytes read. + bb.position(bb.position() + cis.getTotalBytesRead()); + } + return (T)message; + } + } + /** * adapter to allow decoding of writables and protobufs from a byte buffer. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 4448164f4b137..68d4923a0aaf5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -123,7 +123,7 @@ import org.apache.htrace.core.Tracer; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; import org.apache.hadoop.thirdparty.protobuf.Message; @@ -304,7 +304,11 @@ public Class getRpcRequestWrapper( RpcKindMapValue val = rpcKindMap.get(ProtoUtil.convert(rpcKind)); return (val == null) ? null : val.rpcRequestWrapperClass; } - + + protected RpcInvoker getServerRpcInvoker(RPC.RpcKind rpcKind) { + return getRpcInvoker(rpcKind); + } + public static RpcInvoker getRpcInvoker(RPC.RpcKind rpcKind) { RpcKindMapValue val = rpcKindMap.get(rpcKind); return (val == null) ? null : val.rpcInvoker; @@ -639,7 +643,22 @@ public static void bind(ServerSocket socket, InetSocketAddress address, address.getPort(), e); } } - + + @VisibleForTesting + int getPriorityLevel(Schedulable e) { + return callQueue.getPriorityLevel(e); + } + + @VisibleForTesting + int getPriorityLevel(UserGroupInformation ugi) { + return callQueue.getPriorityLevel(ugi); + } + + @VisibleForTesting + void setPriorityLevel(UserGroupInformation ugi, int priority) { + callQueue.setPriorityLevel(ugi, priority); + } + /** * Returns a handle to the rpcMetrics (required in tests) * @return rpc metrics @@ -2039,7 +2058,7 @@ private void saslProcess(RpcSaslProto saslMessage) LOG.debug("SASL server successfully authenticated client: " + user); } rpcMetrics.incrAuthenticationSuccesses(); - AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user); + AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user + " from " + toString()); saslContextEstablished = true; } } catch (RpcServerException rse) { // don't re-wrap @@ -2183,7 +2202,7 @@ private void doSaslReply(Message message) throws IOException { private void doSaslReply(Exception ioe) throws IOException { setupResponse(authFailedCall, RpcStatusProto.FATAL, RpcErrorCodeProto.FATAL_UNAUTHORIZED, - null, ioe.getClass().getName(), ioe.toString()); + null, ioe.getClass().getName(), ioe.getMessage()); sendResponse(authFailedCall); } @@ -2578,8 +2597,7 @@ private void processOneRpc(ByteBuffer bb) final RpcCall call = new RpcCall(this, callId, retry); setupResponse(call, rse.getRpcStatusProto(), rse.getRpcErrorCodeProto(), null, - t.getClass().getName(), - t.getMessage() != null ? t.getMessage() : t.toString()); + t.getClass().getName(), t.getMessage()); sendResponse(call); } } @@ -2688,15 +2706,15 @@ private void processRpcRequest(RpcRequestHeaderProto header, call.setPriorityLevel(callQueue.getPriorityLevel(call)); call.markCallCoordinated(false); if(alignmentContext != null && call.rpcRequest != null && - (call.rpcRequest instanceof ProtobufRpcEngine.RpcProtobufRequest)) { + (call.rpcRequest instanceof ProtobufRpcEngine2.RpcProtobufRequest)) { // if call.rpcRequest is not RpcProtobufRequest, will skip the following // step and treat the call as uncoordinated. As currently only certain // ClientProtocol methods request made through RPC protobuf needs to be // coordinated. String methodName; String protoName; - ProtobufRpcEngine.RpcProtobufRequest req = - (ProtobufRpcEngine.RpcProtobufRequest) call.rpcRequest; + ProtobufRpcEngine2.RpcProtobufRequest req = + (ProtobufRpcEngine2.RpcProtobufRequest) call.rpcRequest; try { methodName = req.getRequestHeader().getMethodName(); protoName = req.getRequestHeader().getDeclaringClassProtocolName(); @@ -3710,7 +3728,7 @@ void incrUserConnections(String user) { if (count == null) { count = 1; } else { - count++; + count = count + 1; } userToConnectionsMap.put(user, count); } @@ -3722,7 +3740,7 @@ void decrUserConnections(String user) { if (count == null) { return; } else { - count--; + count = count - 1; } if (count == 0) { userToConnectionsMap.remove(user); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UserIdentityProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UserIdentityProvider.java index 763605e6a464f..91ec1a259f134 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UserIdentityProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UserIdentityProvider.java @@ -31,6 +31,6 @@ public String makeIdentity(Schedulable obj) { return null; } - return ugi.getUserName(); + return ugi.getShortUserName(); } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/DecayRpcSchedulerDetailedMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/DecayRpcSchedulerDetailedMetrics.java index 04a6c0eab1c42..ce65e9c64622d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/DecayRpcSchedulerDetailedMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/DecayRpcSchedulerDetailedMetrics.java @@ -27,7 +27,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class is for maintaining queue (priority) level related diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java index bb4bfcfd08be5..439b87326c2ee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java @@ -19,7 +19,7 @@ import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java index 79eae12314493..829d276ea7281 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java @@ -32,8 +32,8 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Jdk14Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java index eb147ca47ee19..6bf75f9d00a7c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.log; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.util.HashMap; import java.util.Map; import org.apache.commons.math3.stat.descriptive.SummaryStatistics; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java index e2574f647e3a2..a9e777bcba952 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java @@ -18,13 +18,13 @@ package org.apache.hadoop.metrics2; -import com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import java.util.StringJoiner; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * The immutable metric diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java index db8a5d9a8578e..26973f8fb9870 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java @@ -18,13 +18,13 @@ package org.apache.hadoop.metrics2; -import com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import java.util.StringJoiner; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Immutable tag for metrics (for grouping on host/queue/username etc.) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java index ca1d7f97f340c..7e3257d409ebc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/filter/AbstractPatternFilter.java @@ -20,7 +20,7 @@ import java.util.Map; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.configuration2.SubsetConfiguration; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java index a4632c60c108e..28348c7ae36b8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java @@ -18,8 +18,8 @@ package org.apache.hadoop.metrics2.impl; -import com.google.common.base.Objects; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import org.apache.hadoop.metrics2.MetricsRecord; import java.util.StringJoiner; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java index cdd0ba4275ce2..a297072d236d4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java @@ -22,7 +22,7 @@ import javax.management.MBeanAttributeInfo; import javax.management.MBeanInfo; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java index 5345c1baf88fd..4b4b70bd8e607 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java @@ -21,8 +21,8 @@ import java.util.Iterator; import java.util.List; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java index 976f16bedd81b..2d22b75841b33 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java @@ -29,14 +29,15 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.common.base.Joiner; -import com.google.common.base.Splitter; -import com.google.common.collect.Iterables; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.configuration2.Configuration; import org.apache.commons.configuration2.PropertiesConfiguration; import org.apache.commons.configuration2.SubsetConfiguration; +import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler; import org.apache.commons.configuration2.ex.ConfigurationException; import org.apache.commons.configuration2.io.FileHandler; import org.apache.hadoop.metrics2.MetricsFilter; @@ -111,6 +112,7 @@ static MetricsConfig loadFirst(String prefix, String... fileNames) { for (String fname : fileNames) { try { PropertiesConfiguration pcf = new PropertiesConfiguration(); + pcf.setListDelimiterHandler(new DefaultListDelimiterHandler(',')); FileHandler fh = new FileHandler(pcf); fh.setFileName(fname); fh.load(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java index f66ec5cb99ffb..19e4c3b6d4187 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java @@ -21,7 +21,7 @@ import java.util.Collections; import java.util.List; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordFiltered.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordFiltered.java index 5674dfbf68d26..58ebbcffa8e58 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordFiltered.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordFiltered.java @@ -21,7 +21,7 @@ import java.util.Iterator; import java.util.Collection; -import com.google.common.collect.AbstractIterator; +import org.apache.hadoop.thirdparty.com.google.common.collect.AbstractIterator; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsFilter; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java index 8eb6cb8f8678a..14b930e830d77 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java @@ -20,7 +20,7 @@ import java.util.List; -import static com.google.common.base.Preconditions.*; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.AbstractMetric; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java index f2e607b577619..836d9d5cf816f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java @@ -22,7 +22,7 @@ import java.util.Random; import java.util.concurrent.*; -import static com.google.common.base.Preconditions.*; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java index f12ec67dd6125..852f31995a27b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java @@ -29,10 +29,10 @@ import javax.management.ObjectName; import javax.management.ReflectionException; -import static com.google.common.base.Preconditions.*; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsFilter; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java index 624edc96b8ae7..a6edf08e5a717 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java @@ -30,10 +30,10 @@ import java.util.TimerTask; import javax.management.ObjectName; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.annotations.VisibleForTesting; -import static com.google.common.base.Preconditions.*; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; import org.apache.commons.configuration2.PropertiesConfiguration; import org.apache.commons.math3.util.ArithmeticUtils; @@ -273,7 +273,11 @@ void registerSource(String name, String desc, MetricsSource source) { T register(final String name, final String description, final T sink) { LOG.debug(name +", "+ description); if (allSinks.containsKey(name)) { - LOG.warn("Sink "+ name +" already exists!"); + if(sinks.get(name) == null) { + registerSink(name, description, sink); + } else { + LOG.warn("Sink "+ name +" already exists!"); + } return sink; } allSinks.put(name, sink); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java index 935f47f3a09ee..83e458f06c68c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java @@ -27,7 +27,7 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The default metrics system singleton. This class is used by all the daemon diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java index 9b54adcb4314f..96eb5026be179 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java @@ -20,7 +20,7 @@ import java.lang.reflect.Method; -import static com.google.common.base.Preconditions.*; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.metrics2.MetricsException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java index e3adc821de5c4..e86398f544edf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java @@ -18,12 +18,12 @@ package org.apache.hadoop.metrics2.lib; -import com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; import org.apache.hadoop.metrics2.MetricsInfo; import java.util.StringJoiner; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Making implementing metric info a little easier diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java index 6227d0954fb6d..b71f7f8cc5ee0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.MetricsException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java index 1fcede464dacd..f400f02d256f2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java @@ -22,7 +22,7 @@ import java.lang.reflect.Field; import java.lang.reflect.Method; -import static com.google.common.base.Preconditions.*; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsCollector; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounter.java index 8ba72343f2249..e616bb6d934dd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import static com.google.common.base.Preconditions.*; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGauge.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGauge.java index 03384f69147b7..6c77e97353869 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGauge.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGauge.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import static com.google.common.base.Preconditions.*; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java index 910805e4246d0..0e69c268c94cd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java @@ -35,8 +35,8 @@ import org.apache.hadoop.metrics2.util.QuantileEstimator; import org.apache.hadoop.metrics2.util.SampleQuantiles; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Watches a stream of long values, maintaining online estimates of specific diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java index 994eb13e08dae..c31c2e67f8f31 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java @@ -21,8 +21,8 @@ import java.lang.reflect.Method; import java.util.Set; -import static com.google.common.base.Preconditions.*; -import com.google.common.collect.Sets; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java index 5fe0083aa5dce..7795343de3c20 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import java.lang.ref.WeakReference; import java.lang.reflect.Method; import java.util.Iterator; @@ -163,6 +163,7 @@ private synchronized MutableRate addMetricIfNotExists(String name) { MutableRate metric = globalMetrics.get(name); if (metric == null) { metric = new MutableRate(name + typePrefix, name + typePrefix, false); + metric.setUpdateTimeStamp(true); globalMetrics.put(name, metric); } return metric; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java index 6803d11d1ca38..17233629c7d26 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java @@ -31,7 +31,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -39,8 +39,9 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.Time; import javax.annotation.Nullable; @@ -77,13 +78,26 @@ public class MutableRollingAverages extends MutableMetric implements Closeable { private final String avgInfoDescTemplate; private int numWindows; + /** + * This class maintains sub-sum and sub-total of SampleStat. + */ private static class SumAndCount { private final double sum; private final long count; - - SumAndCount(final double sum, final long count) { + private final long snapshotTimeStamp; + + /** + * Constructor for {@link SumAndCount}. + * + * @param sum sub-sum in sliding windows + * @param count sub-total in sliding windows + * @param snapshotTimeStamp when is a new SampleStat snapshot. + */ + SumAndCount(final double sum, final long count, + final long snapshotTimeStamp) { this.sum = sum; this.count = count; + this.snapshotTimeStamp = snapshotTimeStamp; } public double getSum() { @@ -93,6 +107,10 @@ public double getSum() { public long getCount() { return count; } + + public long getSnapshotTimeStamp() { + return snapshotTimeStamp; + } } /** @@ -110,6 +128,16 @@ public long getCount() { private static final long WINDOW_SIZE_MS_DEFAULT = 300_000; private static final int NUM_WINDOWS_DEFAULT = 36; + /** + * Time duration after which a record is considered stale. + * {@link MutableRollingAverages} should be time-sensitive, and it should use + * the time window length(i.e. NUM_WINDOWS_DEFAULT * WINDOW_SIZE_MS_DEFAULT) + * as the valid time to make sure some too old record won't be use to compute + * average. + */ + private long recordValidityMs = + NUM_WINDOWS_DEFAULT * WINDOW_SIZE_MS_DEFAULT; + /** * Constructor for {@link MutableRollingAverages}. * @param metricValueName @@ -231,7 +259,8 @@ public LinkedBlockingDeque apply(String k) { }); final SumAndCount sumAndCount = new SumAndCount( rate.lastStat().total(), - rate.lastStat().numSamples()); + rate.lastStat().numSamples(), + rate.getSnapshotTimeStamp()); /* put newest sum and count to the end */ if (!deque.offerLast(sumAndCount)) { deque.pollFirst(); @@ -267,8 +296,11 @@ public synchronized Map getStats(long minSamples) { long totalCount = 0; for (final SumAndCount sumAndCount : entry.getValue()) { - totalCount += sumAndCount.getCount(); - totalSum += sumAndCount.getSum(); + if (Time.monotonicNow() - sumAndCount.getSnapshotTimeStamp() + < recordValidityMs) { + totalCount += sumAndCount.getCount(); + totalSum += sumAndCount.getSum(); + } } if (totalCount > minSamples) { @@ -277,4 +309,12 @@ public synchronized Map getStats(long minSamples) { } return stats; } + + /** + * Use for test only. + */ + @VisibleForTesting + public synchronized void setRecordValidityMs(long value) { + this.recordValidityMs = value; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java index 5ef31785a61e8..e04b4b58ece0b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java @@ -24,6 +24,8 @@ import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.util.SampleStat; +import org.apache.hadoop.util.Time; + import static org.apache.hadoop.metrics2.lib.Interns.*; /** @@ -47,7 +49,9 @@ public class MutableStat extends MutableMetric { private final SampleStat prevStat = new SampleStat(); private final SampleStat.MinMax minMax = new SampleStat.MinMax(); private long numSamples = 0; + private long snapshotTimeStamp = 0; private boolean extended = false; + private boolean updateTimeStamp = false; /** * Construct a sample statistics metric @@ -100,6 +104,13 @@ public synchronized void setExtended(boolean extended) { this.extended = extended; } + /** + * Set whether to update the snapshot time or not. + * @param updateTimeStamp enable update stats snapshot timestamp + */ + public synchronized void setUpdateTimeStamp(boolean updateTimeStamp) { + this.updateTimeStamp = updateTimeStamp; + } /** * Add a number of samples and their sum to the running stat * @@ -115,7 +126,7 @@ public synchronized void add(long numSamples, long sum) { } /** - * Add a snapshot to the metric + * Add a snapshot to the metric. * @param value of the metric */ public synchronized void add(long value) { @@ -142,6 +153,9 @@ public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) { if (numSamples > 0) { intervalStat.copyTo(prevStat); intervalStat.reset(); + if (updateTimeStamp) { + snapshotTimeStamp = Time.monotonicNow(); + } } clearChanged(); } @@ -164,6 +178,12 @@ public void resetMinMax() { minMax.reset(); } + /** + * Return the SampleStat snapshot timestamp + */ + public long getSnapshotTimeStamp() { + return snapshotTimeStamp; + } @Override public String toString() { return lastStat().toString(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/UniqueNames.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/UniqueNames.java index 0df852d46b371..2508ee27bbee8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/UniqueNames.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/UniqueNames.java @@ -20,8 +20,8 @@ import java.util.Map; -import com.google.common.base.Joiner; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java index 1d330c74ab46a..4dfe9c6854049 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.sink; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.Closeable; import java.io.IOException; import java.io.PrintStream; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java index f19a2be0b4195..816940b109879 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java @@ -27,8 +27,8 @@ import java.util.List; import java.util.concurrent.ConcurrentHashMap; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java index 1b50498bbaf5a..ed839a36199df 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java @@ -28,12 +28,12 @@ import javax.management.MBeanServer; import javax.management.ObjectName; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java index 6cfbc39f896f4..bf6e910c3e232 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.util; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.AbstractMetric; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Quantile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Quantile.java index 40e31bd841ba8..7a100edc228fb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Quantile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Quantile.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; /** * Specifies a quantile (with error bounds) to be watched by a diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java index 0c5d98f2374ed..7383b2d90d70b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java @@ -26,9 +26,9 @@ import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java index 19a64a9f9d171..2bd49e9f211ba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java @@ -22,7 +22,7 @@ import java.net.InetSocketAddress; import java.util.List; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java index 531ad80f41722..ffe7ee5676aee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java @@ -18,7 +18,7 @@ package org.apache.hadoop.net; -import com.google.common.net.InetAddresses; +import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index d98254cb1ca25..3715e3cdfb6d7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -37,6 +37,7 @@ import java.net.UnknownHostException; import java.net.ConnectException; import java.nio.channels.SocketChannel; +import java.nio.channels.UnresolvedAddressException; import java.util.Map.Entry; import java.util.regex.Pattern; import java.util.*; @@ -44,6 +45,7 @@ import javax.net.SocketFactory; +import org.apache.hadoop.security.AccessControlException; import org.apache.commons.net.util.SubnetUtils; import org.apache.commons.net.util.SubnetUtils.SubnetInfo; import org.apache.hadoop.classification.InterfaceAudience; @@ -55,7 +57,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -534,6 +536,8 @@ public static void connect(Socket socket, } } catch (SocketTimeoutException ste) { throw new ConnectTimeoutException(ste.getMessage()); + } catch (UnresolvedAddressException uae) { + throw new UnknownHostException(uae.getMessage()); } // There is a very rare case allowed by the TCP specification, such that @@ -803,6 +807,11 @@ public static IOException wrapException(final String destHost, + " failed on socket exception: " + exception + ";" + see("SocketException")); + } else if (exception instanceof AccessControlException) { + return wrapWithMessage(exception, + "Call From " + + localHost + " to " + destHost + ":" + destPort + + " failed: " + exception.getMessage()); } else { // 1. Return instance of same type with exception msg if Exception has a // String constructor. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index aae56dd98daa4..e27423125d32e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.net; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -29,6 +28,8 @@ import org.slf4j.LoggerFactory; import java.util.*; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; @@ -52,6 +53,8 @@ public class NetworkTopology { private static final char PATH_SEPARATOR = '/'; private static final String PATH_SEPARATOR_STR = "/"; private static final String ROOT = "/"; + private static final AtomicReference RANDOM_REF = + new AtomicReference<>(); public static class InvalidTopologyException extends RuntimeException { private static final long serialVersionUID = 1L; @@ -396,17 +399,12 @@ static public int getDistanceByPath(Node node1, Node node2) { * @exception IllegalArgumentException when either node1 or node2 is null, or * node1 or node2 do not belong to the cluster */ - public boolean isOnSameRack( Node node1, Node node2) { + public boolean isOnSameRack(Node node1, Node node2) { if (node1 == null || node2 == null) { return false; } - - netlock.readLock().lock(); - try { - return isSameParents(node1, node2); - } finally { - netlock.readLock().unlock(); - } + + return isSameParents(node1, node2); } /** @@ -440,11 +438,14 @@ protected boolean isSameParents(Node node1, Node node2) { return node1.getParent()==node2.getParent(); } - private static final Random r = new Random(); - @VisibleForTesting void setRandomSeed(long seed) { - r.setSeed(seed); + RANDOM_REF.set(new Random(seed)); + } + + Random getRandom() { + Random random = RANDOM_REF.get(); + return (random == null) ? ThreadLocalRandom.current() : random; } /** @@ -563,6 +564,7 @@ private Node chooseRandom(final InnerNode parentNode, totalInScopeNodes, availableNodes); return null; } + Random r = getRandom(); if (excludedNodes == null || excludedNodes.isEmpty()) { // if there are no excludedNodes, randomly choose a node final int index = r.nextInt(totalInScopeNodes); @@ -879,7 +881,7 @@ public void sortByDistance(Node reader, Node[] nodes, int activeLen) { * This method is called if the reader is a datanode, * so nonDataNodeReader flag is set to false. */ - sortByDistance(reader, nodes, activeLen, list -> Collections.shuffle(list)); + sortByDistance(reader, nodes, activeLen, null); } /** @@ -922,8 +924,7 @@ public void sortByDistanceUsingNetworkLocation(Node reader, Node[] nodes, * This method is called if the reader is not a datanode, * so nonDataNodeReader flag is set to true. */ - sortByDistanceUsingNetworkLocation(reader, nodes, activeLen, - list -> Collections.shuffle(list)); + sortByDistanceUsingNetworkLocation(reader, nodes, activeLen, null); } /** @@ -950,6 +951,7 @@ public void sortByDistanceUsingNetworkLocation(Node reader, *

    * As an additional twist, we also randomize the nodes at each network * distance. This helps with load balancing when there is data skew. + * And it helps choose node with more fast storage type. * * @param reader Node where data will be read * @param nodes Available replicas with the requested data @@ -960,35 +962,28 @@ private void sortByDistance(Node reader, T[] nodes, int activeLen, Consumer> secondarySort, boolean nonDataNodeReader) { /** Sort weights for the nodes array */ - int[] weights = new int[activeLen]; - for (int i=0; i> weightedNodeTree = + new TreeMap<>(); + int nWeight; + for (int i = 0; i < activeLen; i++) { + if (nonDataNodeReader) { + nWeight = getWeightUsingNetworkLocation(reader, nodes[i]); } else { - weights[i] = getWeight(reader, nodes[i]); - } - } - // Add weight/node pairs to a TreeMap to sort - TreeMap> tree = new TreeMap<>(); - for (int i=0; i list = tree.get(weight); - if (list == null) { - list = Lists.newArrayListWithExpectedSize(1); - tree.put(weight, list); + nWeight = getWeight(reader, nodes[i]); } - list.add(node); + weightedNodeTree.computeIfAbsent( + nWeight, k -> new ArrayList<>(1)).add(nodes[i]); } - // Sort nodes which have the same weight using secondarySort. int idx = 0; - for (List list: tree.values()) { - if (list != null) { - secondarySort.accept(list); - for (T n: list) { - nodes[idx] = n; - idx++; - } + // Sort nodes which have the same weight using secondarySort. + for (List nodesList : weightedNodeTree.values()) { + Collections.shuffle(nodesList, getRandom()); + if (secondarySort != null) { + // a secondary sort breaks the tie between nodes. + secondarySort.accept(nodesList); + } + for (T n : nodesList) { + nodes[idx++] = n; } } Preconditions.checkState(idx == activeLen, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java index f5cbe17519d60..45f776e692ac4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java @@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A wrapper stream around a socket which allows setting of its timeout. If the diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java index 9693220438dd6..99fde5cb50407 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java @@ -32,7 +32,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.CloseableReferenceCount; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java index e36399ff96c01..17c7d4b65c401 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java @@ -35,9 +35,9 @@ import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.util.NativeCodeLoader; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java index b29278bd20751..5ff57788a85cb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java @@ -37,18 +37,18 @@ import org.apache.htrace.core.TraceScope; import org.apache.htrace.core.Tracer; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Ticker; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Ticker; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IngressPortBasedResolver.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IngressPortBasedResolver.java index a30e4a84dd86b..1431ed5d0e907 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IngressPortBasedResolver.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IngressPortBasedResolver.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.security; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.net.InetAddress; import java.util.Collection; import java.util.HashMap; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java index 8e71f69c858d1..3e89c27a78d59 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java @@ -58,7 +58,7 @@ import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; -import com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java index d7a68210c7dc2..23efffcd98b0a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java @@ -24,7 +24,7 @@ import java.net.URISyntaxException; import java.net.URL; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java index 6accf2fdced02..6af28f155c466 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RuleBasedLdapGroupsMapping.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.security; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java index 215f473b9fcc9..7dc28fb277aa8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java @@ -71,7 +71,7 @@ import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.util.ProtoUtil; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.ByteString; import com.google.re2j.Pattern; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index aa12b93be95b4..3b9e9c53e44f4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -57,8 +57,8 @@ import org.xbill.DNS.ResolverConfig; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.net.InetAddresses; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; /** * Security Utils. @@ -380,7 +380,25 @@ public static void setSecurityInfoProviders(SecurityInfo... providers) { } return null; } - + + /** + * Look up the client principal for a given protocol. It searches all known + * SecurityInfo providers. + * @param protocol the protocol class to get the information for + * @param conf configuration object + * @return client principal or null if it has no client principal defined. + */ + public static String getClientPrincipal(Class protocol, + Configuration conf) { + String user = null; + KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf); + if (krbInfo != null) { + String key = krbInfo.clientPrincipal(); + user = (key != null && !key.isEmpty()) ? conf.get(key) : null; + } + return user; + } + /** * Look up the TokenInfo for a given protocol. It searches all known * SecurityInfo providers. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java index 92ea83d8f1da5..e517bad4bb42d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java @@ -32,9 +32,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.BiMap; -import com.google.common.collect.HashBiMap; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.BiMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashBiMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java index 31f43980552f2..96e4402e5b9c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java @@ -23,8 +23,8 @@ import java.util.StringTokenizer; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 8c84a8d31a063..b783f828faacb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -28,7 +28,7 @@ import static org.apache.hadoop.util.PlatformName.IBM_JAVA; import static org.apache.hadoop.util.StringUtils.getTrimmedStringCollection; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.File; import java.io.IOException; @@ -530,6 +530,14 @@ private void setLogin(LoginContext login) { user.setLogin(login); } + /** + * Set the last login time for logged in user + * @param loginTime the number of milliseconds since the beginning of time + */ + private void setLastLogin(long loginTime) { + user.setLastLogin(loginTime); + } + /** * Create a UserGroupInformation for the given subject. * This does not change the subject or acquire new credentials. @@ -1225,7 +1233,29 @@ public void reloginFromKeytab() throws IOException { reloginFromKeytab(false); } + /** + * Force re-Login a user in from a keytab file irrespective of the last login + * time. Loads a user identity from a keytab file and logs them in. They + * become the currently logged-in user. This method assumes that + * {@link #loginUserFromKeytab(String, String)} had happened already. The + * Subject field of this UserGroupInformation object is updated to have the + * new credentials. + * + * @throws IOException + * @throws KerberosAuthException on a failure + */ + @InterfaceAudience.Public + @InterfaceStability.Evolving + public void forceReloginFromKeytab() throws IOException { + reloginFromKeytab(false, true); + } + private void reloginFromKeytab(boolean checkTGT) throws IOException { + reloginFromKeytab(checkTGT, false); + } + + private void reloginFromKeytab(boolean checkTGT, boolean ignoreLastLoginTime) + throws IOException { if (!shouldRelogin() || !isFromKeytab()) { return; } @@ -1240,7 +1270,7 @@ private void reloginFromKeytab(boolean checkTGT) throws IOException { return; } } - relogin(login); + relogin(login, ignoreLastLoginTime); } /** @@ -1261,25 +1291,27 @@ public void reloginFromTicketCache() throws IOException { if (login == null) { throw new KerberosAuthException(MUST_FIRST_LOGIN); } - relogin(login); + relogin(login, false); } - private void relogin(HadoopLoginContext login) throws IOException { + private void relogin(HadoopLoginContext login, boolean ignoreLastLoginTime) + throws IOException { // ensure the relogin is atomic to avoid leaving credentials in an // inconsistent state. prevents other ugi instances, SASL, and SPNEGO // from accessing or altering credentials during the relogin. synchronized(login.getSubjectLock()) { // another racing thread may have beat us to the relogin. if (login == getLogin()) { - unprotectedRelogin(login); + unprotectedRelogin(login, ignoreLastLoginTime); } } } - private void unprotectedRelogin(HadoopLoginContext login) throws IOException { + private void unprotectedRelogin(HadoopLoginContext login, + boolean ignoreLastLoginTime) throws IOException { assert Thread.holdsLock(login.getSubjectLock()); long now = Time.now(); - if (!hasSufficientTimeElapsed(now)) { + if (!hasSufficientTimeElapsed(now) && !ignoreLastLoginTime) { return; } // register most recent relogin attempt @@ -1946,6 +1978,7 @@ private static UserGroupInformation doSubjectLogin( if (subject == null) { params.put(LoginParam.PRINCIPAL, ugi.getUserName()); ugi.setLogin(login); + ugi.setLastLogin(Time.now()); } return ugi; } catch (LoginException le) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java index df783f16edb90..10f8da4ec5781 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.ProviderUtils; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java index 603772444bcef..f172f0828eb32 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java @@ -25,7 +25,7 @@ import java.util.Arrays; import java.util.List; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java index b766d5c37fa2f..f2589308640c9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java @@ -18,6 +18,7 @@ package org.apache.hadoop.security.authorize; +import java.net.InetAddress; import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -30,7 +31,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.MachineList; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceStability.Unstable @InterfaceAudience.Public @@ -105,8 +106,8 @@ public Configuration getConf() { } @Override - public void authorize(UserGroupInformation user, - String remoteAddress) throws AuthorizationException { + public void authorize(UserGroupInformation user, + InetAddress remoteAddress) throws AuthorizationException { if (user == null) { throw new IllegalArgumentException("user is null."); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ImpersonationProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ImpersonationProvider.java index 8b483f0336f3d..eff77d8942cf7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ImpersonationProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ImpersonationProvider.java @@ -18,6 +18,9 @@ package org.apache.hadoop.security.authorize; +import java.net.InetAddress; +import java.net.UnknownHostException; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; @@ -38,12 +41,29 @@ public interface ImpersonationProvider extends Configurable { public void init(String configurationPrefix); /** - * Authorize the superuser which is doing doAs - * + * Authorize the superuser which is doing doAs. + * {@link #authorize(UserGroupInformation, InetAddress)} should + * be preferred to avoid possibly re-resolving the ip address. + * @param user ugi of the effective or proxy user which contains a real user. + * @param remoteAddress the ip address of client. + * @throws AuthorizationException + */ + default void authorize(UserGroupInformation user, String remoteAddress) + throws AuthorizationException { + try { + authorize(user, InetAddress.getByName(remoteAddress)); + } catch (UnknownHostException e) { + throw new AuthorizationException(e); + } + } + + /** + * Authorize the superuser which is doing doAs. + * * @param user ugi of the effective or proxy user which contains a real user * @param remoteAddress the ip address of client * @throws AuthorizationException */ - public void authorize(UserGroupInformation user, String remoteAddress) + void authorize(UserGroupInformation user, InetAddress remoteAddress) throws AuthorizationException; } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java index 97a7f080fbdf9..e7e2a05a878e9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java @@ -18,7 +18,9 @@ package org.apache.hadoop.security.authorize; -import com.google.common.base.Preconditions; +import java.net.InetAddress; + +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -26,7 +28,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceStability.Unstable @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive"}) @@ -86,22 +88,41 @@ public static void refreshSuperUserGroupsConfiguration(Configuration conf) { } /** - * Authorize the superuser which is doing doAs - * + * Authorize the superuser which is doing doAs. + * {@link #authorize(UserGroupInformation, InetAddress)} should be preferred + * to avoid possibly re-resolving the ip address. + * * @param user ugi of the effective or proxy user which contains a real user * @param remoteAddress the ip address of client * @throws AuthorizationException */ public static void authorize(UserGroupInformation user, String remoteAddress) throws AuthorizationException { - if (sip==null) { - // In a race situation, It is possible for multiple threads to satisfy this condition. + getSip().authorize(user, remoteAddress); + } + + /** + * Authorize the superuser which is doing doAs. + * + * @param user ugi of the effective or proxy user which contains a real user + * @param remoteAddress the inet address of client + * @throws AuthorizationException + */ + public static void authorize(UserGroupInformation user, + InetAddress remoteAddress) throws AuthorizationException { + getSip().authorize(user, remoteAddress); + } + + private static ImpersonationProvider getSip() { + if (sip == null) { + // In a race situation, It is possible for multiple threads to satisfy + // this condition. // The last assignment will prevail. - refreshSuperUserGroupsConfiguration(); + refreshSuperUserGroupsConfiguration(); } - sip.authorize(user, remoteAddress); + return sip; } - + /** * This function is kept to provide backward compatibility. * @param user @@ -118,7 +139,7 @@ public static void authorize(UserGroupInformation user, @VisibleForTesting public static DefaultImpersonationProvider getDefaultImpersonationProvider() { - return ((DefaultImpersonationProvider)sip); + return ((DefaultImpersonationProvider) getSip()); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java index a264eb4dcd9fb..c83afc7fe4b92 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java @@ -28,12 +28,11 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.MachineList; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -101,21 +100,19 @@ public void authorize(UserGroupInformation user, String clientPrincipal = null; if (UserGroupInformation.isSecurityEnabled()) { // get client principal key to verify (if available) - KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf); - if (krbInfo != null) { - String clientKey = krbInfo.clientPrincipal(); - if (clientKey != null && !clientKey.isEmpty()) { - try { - clientPrincipal = SecurityUtil.getServerPrincipal( - conf.get(clientKey), addr); - } catch (IOException e) { - throw (AuthorizationException) new AuthorizationException( - "Can't figure out Kerberos principal name for connection from " - + addr + " for user=" + user + " protocol=" + protocol) - .initCause(e); - } + clientPrincipal = SecurityUtil.getClientPrincipal(protocol, conf); + try { + if (clientPrincipal != null) { + clientPrincipal = + SecurityUtil.getServerPrincipal(clientPrincipal, addr); } + } catch (IOException e) { + throw (AuthorizationException) new AuthorizationException( + "Can't figure out Kerberos principal name for connection from " + + addr + " for user=" + user + " protocol=" + protocol) + .initCause(e); } + } if((clientPrincipal != null && !clientPrincipal.equals(user.getUserName())) || acls.length != 2 || !acls[0].isUserAllowed(user) || acls[1].isUserAllowed(user)) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java index 60c2864bbe539..6ba651c13da0b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java @@ -36,7 +36,7 @@ import org.apache.commons.lang3.StringUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java index 59cb0d6599595..b81ed8e90155e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/RestCsrfPreventionFilter.java @@ -37,6 +37,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.eclipse.jetty.server.Response; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -271,6 +272,10 @@ public void proceed() throws IOException, ServletException { @Override public void sendError(int code, String message) throws IOException { + if (httpResponse instanceof Response) { + ((Response)httpResponse).setStatusWithReason(code, message); + } + httpResponse.sendError(code, message); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java index c961364aa1124..9d7afa933b65e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.net.InetAddress; import java.net.Socket; -import java.net.SocketException; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; @@ -31,11 +30,9 @@ import javax.net.ssl.SSLSocket; import javax.net.ssl.SSLSocketFactory; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.wildfly.openssl.OpenSSLProvider; -import org.wildfly.openssl.SSL; - /** * A {@link SSLSocketFactory} that can delegate to various SSL implementations. @@ -60,8 +57,8 @@ *

    * * In order to load OpenSSL, applications must ensure the wildfly-openssl - * artifact is on the classpath. Currently, only ABFS and S3A provide - * wildfly-openssl as a runtime dependency. + * artifact is on the classpath. Currently, only ABFS declares + * wildfly-openssl as an explicit dependency. */ public final class DelegatingSSLSocketFactory extends SSLSocketFactory { @@ -110,7 +107,16 @@ public static synchronized void initializeDefaultFactory( } /** - * Singletone instance of the SSLSocketFactory. + * For testing only: reset the socket factory. + */ + @VisibleForTesting + public static synchronized void resetDefaultFactory() { + LOG.info("Resetting default SSL Socket Factory"); + instance = null; + } + + /** + * Singleton instance of the SSLSocketFactory. * * SSLSocketFactory must be initialized with appropriate SSLChannelMode * using initializeDefaultFactory method. @@ -126,9 +132,7 @@ private DelegatingSSLSocketFactory(SSLChannelMode preferredChannelMode) throws IOException { try { initializeSSLContext(preferredChannelMode); - } catch (NoSuchAlgorithmException e) { - throw new IOException(e); - } catch (KeyManagementException e) { + } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new IOException(e); } @@ -146,42 +150,23 @@ private DelegatingSSLSocketFactory(SSLChannelMode preferredChannelMode) } private void initializeSSLContext(SSLChannelMode preferredChannelMode) - throws NoSuchAlgorithmException, KeyManagementException { + throws NoSuchAlgorithmException, KeyManagementException, IOException { + LOG.debug("Initializing SSL Context to channel mode {}", + preferredChannelMode); switch (preferredChannelMode) { case Default: - if (!openSSLProviderRegistered) { - OpenSSLProvider.register(); - openSSLProviderRegistered = true; - } try { - java.util.logging.Logger logger = java.util.logging.Logger.getLogger( - SSL.class.getName()); - logger.setLevel(Level.WARNING); - ctx = SSLContext.getInstance("openssl.TLS"); - ctx.init(null, null, null); - // Strong reference needs to be kept to logger until initialization of - // SSLContext finished (see HADOOP-16174): - logger.setLevel(Level.INFO); + bindToOpenSSLProvider(); channelMode = SSLChannelMode.OpenSSL; - } catch (NoSuchAlgorithmException e) { - LOG.debug("Failed to load OpenSSL. Falling back to the JSSE default."); + } catch (LinkageError | NoSuchAlgorithmException | RuntimeException e) { + LOG.debug("Failed to load OpenSSL. Falling back to the JSSE default.", + e); ctx = SSLContext.getDefault(); channelMode = SSLChannelMode.Default_JSSE; } break; case OpenSSL: - if (!openSSLProviderRegistered) { - OpenSSLProvider.register(); - openSSLProviderRegistered = true; - } - java.util.logging.Logger logger = java.util.logging.Logger.getLogger( - SSL.class.getName()); - logger.setLevel(Level.WARNING); - ctx = SSLContext.getInstance("openssl.TLS"); - ctx.init(null, null, null); - // Strong reference needs to be kept to logger until initialization of - // SSLContext finished (see HADOOP-16174): - logger.setLevel(Level.INFO); + bindToOpenSSLProvider(); channelMode = SSLChannelMode.OpenSSL; break; case Default_JSSE: @@ -193,11 +178,38 @@ private void initializeSSLContext(SSLChannelMode preferredChannelMode) channelMode = SSLChannelMode.Default_JSSE_with_GCM; break; default: - throw new NoSuchAlgorithmException("Unknown channel mode: " + throw new IOException("Unknown channel mode: " + preferredChannelMode); } } + /** + * Bind to the OpenSSL provider via wildfly. + * This MUST be the only place where wildfly classes are referenced, + * so ensuring that any linkage problems only surface here where they may + * be caught by the initialization code. + */ + private void bindToOpenSSLProvider() + throws NoSuchAlgorithmException, KeyManagementException { + if (!openSSLProviderRegistered) { + LOG.debug("Attempting to register OpenSSL provider"); + org.wildfly.openssl.OpenSSLProvider.register(); + openSSLProviderRegistered = true; + } + // Strong reference needs to be kept to logger until initialization of + // SSLContext finished (see HADOOP-16174): + java.util.logging.Logger logger = java.util.logging.Logger.getLogger( + "org.wildfly.openssl.SSL"); + Level originalLevel = logger.getLevel(); + try { + logger.setLevel(Level.WARNING); + ctx = SSLContext.getInstance("openssl.TLS"); + ctx.init(null, null, null); + } finally { + logger.setLevel(originalLevel); + } + } + public String getProviderName() { return providerName; } @@ -212,21 +224,26 @@ public String[] getSupportedCipherSuites() { return ciphers.clone(); } + /** + * Get the channel mode of this instance. + * @return a channel mode. + */ + public SSLChannelMode getChannelMode() { + return channelMode; + } + public Socket createSocket() throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory.createSocket(); - configureSocket(ss); - return ss; + return configureSocket(factory.createSocket()); } @Override public Socket createSocket(Socket s, String host, int port, boolean autoClose) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory.createSocket(s, host, port, autoClose); - configureSocket(ss); - return ss; + return configureSocket( + factory.createSocket(s, host, port, autoClose)); } @Override @@ -234,52 +251,41 @@ public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory - .createSocket(address, port, localAddress, localPort); - - configureSocket(ss); - return ss; + return configureSocket(factory + .createSocket(address, port, localAddress, localPort)); } @Override public Socket createSocket(String host, int port, InetAddress localHost, int localPort) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory - .createSocket(host, port, localHost, localPort); - configureSocket(ss); - - return ss; + return configureSocket(factory + .createSocket(host, port, localHost, localPort)); } @Override public Socket createSocket(InetAddress host, int port) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory.createSocket(host, port); - - configureSocket(ss); - return ss; + return configureSocket(factory.createSocket(host, port)); } @Override public Socket createSocket(String host, int port) throws IOException { SSLSocketFactory factory = ctx.getSocketFactory(); - SSLSocket ss = (SSLSocket) factory.createSocket(host, port); - - configureSocket(ss); - return ss; + return configureSocket(factory.createSocket(host, port)); } - private void configureSocket(SSLSocket ss) throws SocketException { - ss.setEnabledCipherSuites(ciphers); + private Socket configureSocket(Socket socket) { + ((SSLSocket) socket).setEnabledCipherSuites(ciphers); + return socket; } private String[] alterCipherList(String[] defaultCiphers) { - ArrayList preferredSuits = new ArrayList<>(); + ArrayList preferredSuites = new ArrayList<>(); // Remove GCM mode based ciphers from the supported list. for (int i = 0; i < defaultCiphers.length; i++) { @@ -287,11 +293,11 @@ private String[] alterCipherList(String[] defaultCiphers) { LOG.debug("Removed Cipher - {} from list of enabled SSLSocket ciphers", defaultCiphers[i]); } else { - preferredSuits.add(defaultCiphers[i]); + preferredSuites.add(defaultCiphers[i]); } } - ciphers = preferredSuits.toArray(new String[0]); + ciphers = preferredSuites.toArray(new String[0]); return ciphers; } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java index 3531173bb72f7..b184e4a152b8b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.security.ssl; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java index b2f0118aaf5c6..7430477932292 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java @@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java index 4f0f6fc4d444a..e521a7c07b92f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java @@ -18,8 +18,8 @@ package org.apache.hadoop.security.token; -import com.google.common.collect.Maps; -import com.google.common.primitives.Bytes; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.HadoopIllegalArgumentException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java index 6dfe52a83bf68..3f27e45af8191 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java @@ -32,7 +32,7 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.TokenIdentifier; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Public @InterfaceStability.Evolving @@ -49,7 +49,7 @@ public abstract class AbstractDelegationTokenIdentifier private int masterKeyId = 0; public AbstractDelegationTokenIdentifier() { - this(new Text(), new Text(), new Text()); + this(null, null, null); } public AbstractDelegationTokenIdentifier(Text owner, Text renewer, Text realUser) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index f329accec7553..2cf270b1426be 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -41,7 +41,7 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java index f61590c28ebce..6f65574b98386 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java @@ -65,8 +65,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * An implementation of {@link AbstractDelegationTokenSecretManager} that diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java index 4e9881bc34369..eb84c63bb319d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.security.token.delegation.web; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.Text; @@ -295,10 +295,8 @@ public HttpURLConnection openConnection(URL url, Token token, String doAs) // delegation token Credentials creds = UserGroupInformation.getCurrentUser(). getCredentials(); - if (LOG.isDebugEnabled()) { - LOG.debug("Token not set, looking for delegation token. Creds:{}," - + " size:{}", creds.getAllTokens(), creds.numberOfTokens()); - } + LOG.debug("Token not set, looking for delegation token. Creds:{}," + + " size:{}", creds.getAllTokens(), creds.numberOfTokens()); if (!creds.getAllTokens().isEmpty()) { dToken = selectDelegationToken(url, creds); if (dToken != null) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java index 5275526202f2b..ae42c7c458ea2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.security.token.delegation.web; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.curator.framework.CuratorFramework; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java index 284044fd938a8..479517f2a73ab 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java @@ -54,7 +54,7 @@ import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java index e1445fb5ca05b..7e7e794d9de47 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java @@ -33,7 +33,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Delegation Token Manager used by the diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/MultiSchemeDelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/MultiSchemeDelegationTokenAuthenticationHandler.java index 0661fb2b5a2a7..865977e67d07a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/MultiSchemeDelegationTokenAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/MultiSchemeDelegationTokenAuthenticationHandler.java @@ -36,8 +36,8 @@ import org.apache.hadoop.security.authentication.server.HttpConstants; import org.apache.hadoop.security.authentication.server.MultiSchemeAuthenticationHandler; -import com.google.common.base.Preconditions; -import com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; /** * A {@link CompositeAuthenticationHandler} that supports multiple HTTP diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java index c9fec435bfa24..309bfa0eeb197 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java index 594fc5bfe8d14..ad92d4c6d7a24 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java index 17aa9639c31d9..bcb589f24885f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java @@ -20,7 +20,7 @@ import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import sun.misc.Signal; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java index 5e8a1f4eb21fb..903e6bac81026 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java @@ -25,8 +25,8 @@ import java.util.Arrays; import java.util.List; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java index 766fb0a6557eb..130414c2895b5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java @@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -179,7 +179,7 @@ public int run(String argv[]) throws Exception { servicePrincipal); } RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); InetSocketAddress address = NetUtils.createSocketAddr(hostPort); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); Class xface = TraceAdminProtocolPB.class; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AutoCloseableLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AutoCloseableLock.java index d7fe93d73cf02..e761858e3c170 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AutoCloseableLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AutoCloseableLock.java @@ -21,7 +21,7 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This is a wrap class of a ReentrantLock. Extending AutoCloseable diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java index d49013ec14d1f..d08e84f99de29 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java @@ -28,8 +28,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.util.concurrent.MoreExecutors; - import org.apache.hadoop.classification.InterfaceAudience; /** @@ -105,8 +103,7 @@ public Thread newThread(Runnable r) { private BlockingThreadPoolExecutorService(int permitCount, ThreadPoolExecutor eventProcessingExecutor) { - super(MoreExecutors.listeningDecorator(eventProcessingExecutor), - permitCount, false); + super(eventProcessingExecutor, permitCount, false); this.eventProcessingExecutor = eventProcessingExecutor; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java index 84ddc32f88c1f..ff7197ce52e4d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java @@ -23,10 +23,10 @@ import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Iterables; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Simplified List implementation which stores elements as a list diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java index 388a087bbd346..f81a429b5d422 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java @@ -21,7 +21,7 @@ import java.nio.channels.ClosedChannelException; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A closeable object that maintains a reference count. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java index 510938b7fff95..9f4cc1921a353 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceStability; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java index 2ee53dc595f99..ee70256590bc4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java @@ -25,7 +25,7 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicReference; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.FileUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskValidatorFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskValidatorFactory.java index 7d04db23ca76c..1e5e9644beee7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskValidatorFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskValidatorFactory.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.util; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.DiskChecker.DiskErrorException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FindClass.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FindClass.java index 690d09755171f..846af7fb74372 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FindClass.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FindClass.java @@ -17,7 +17,7 @@ package org.apache.hadoop.util; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java index 4247eb7050b5a..e8dc8cb447092 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java @@ -18,7 +18,7 @@ package org.apache.hadoop.util; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java index 3ae4bbac6591d..81a22a46aa850 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java @@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * The IdentityHashStore stores (key, value) mappings in an array. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java index cc0ebdf8b3e39..a5f4f0709dd73 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java index 8ab392ed041d0..8417246f0467c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java index 6842166930d5f..710861c761ae3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java index 1ffb7db3febff..126aa6ab00884 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java index e043b1dc382c0..7a26ecadee456 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java @@ -35,7 +35,7 @@ import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java index 420ac8bc1851e..9c9953fb14c6d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java @@ -28,11 +28,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java index 79de1ac554476..01ddf373c085e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java @@ -24,8 +24,8 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A low memory footprint Cache which extends {@link LightWeightGSet}. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java index 7c7878a71bdf2..d2ea5cf350c66 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java @@ -27,7 +27,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A low memory footprint {@link GSet} implementation, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java index bd646e0bcb608..de95e98e1f4dd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java @@ -19,8 +19,8 @@ package org.apache.hadoop.util; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import java.io.FilterInputStream; import java.io.IOException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java index e2cd3048d5843..520ddf6bdf401 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java @@ -25,6 +25,9 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.IOStatisticsSupport; import org.apache.hadoop.io.Text; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; @@ -42,7 +45,7 @@ */ @InterfaceAudience.LimitedPrivate({"MapReduce"}) @InterfaceStability.Unstable -public class LineReader implements Closeable { +public class LineReader implements Closeable, IOStatisticsSource { private static final int DEFAULT_BUFFER_SIZE = 64 * 1024; private int bufferSize = DEFAULT_BUFFER_SIZE; private InputStream in; @@ -148,7 +151,16 @@ public LineReader(InputStream in, Configuration conf, public void close() throws IOException { in.close(); } - + + /** + * Return any IOStatistics provided by the source. + * @return IO stats from the input stream. + */ + @Override + public IOStatistics getIOStatistics() { + return IOStatisticsSupport.retrieveIOStatistics(in); + } + /** * Read one line from the InputStream into the given Text. * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java index 20931deb0e9bd..f87d059dec75b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java @@ -21,6 +21,7 @@ import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -28,8 +29,7 @@ import org.apache.commons.net.util.SubnetUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.net.InetAddresses; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,9 +61,9 @@ public InetAddress getByName (String host) throws UnknownHostException { } private final boolean all; - private final Set ipAddresses; + private final Set inetAddresses; + private final Collection entries; private final List cidrAddresses; - private final Set hostNames; private final InetAddressFactory addressFactory; /** @@ -71,7 +71,11 @@ public InetAddress getByName (String host) throws UnknownHostException { * @param hostEntries comma separated ip/cidr/host addresses */ public MachineList(String hostEntries) { - this(StringUtils.getTrimmedStringCollection(hostEntries)); + this(hostEntries, InetAddressFactory.S_INSTANCE); + } + + public MachineList(String hostEntries, InetAddressFactory addressFactory) { + this(StringUtils.getTrimmedStringCollection(hostEntries), addressFactory); } /** @@ -88,19 +92,19 @@ public MachineList(Collection hostEntries) { * @param hostEntries * @param addressFactory addressFactory to convert host to InetAddress */ - public MachineList(Collection hostEntries, InetAddressFactory addressFactory) { + public MachineList(Collection hostEntries, + InetAddressFactory addressFactory) { this.addressFactory = addressFactory; if (hostEntries != null) { + entries = new ArrayList<>(hostEntries); if ((hostEntries.size() == 1) && (hostEntries.contains(WILDCARD_VALUE))) { - all = true; - ipAddresses = null; - hostNames = null; + all = true; + inetAddresses = null; cidrAddresses = null; } else { all = false; - Set ips = new HashSet(); + Set addrs = new HashSet<>(); List cidrs = new LinkedList(); - Set hosts = new HashSet(); for (String hostEntry : hostEntries) { //ip address range if (hostEntry.indexOf("/") > -1) { @@ -112,25 +116,29 @@ public MachineList(Collection hostEntries, InetAddressFactory addressFac LOG.warn("Invalid CIDR syntax : " + hostEntry); throw e; } - } else if (InetAddresses.isInetAddress(hostEntry)) { //ip address - ips.add(hostEntry); - } else { //hostname - hosts.add(hostEntry); + } else { + try { + addrs.add(addressFactory.getByName(hostEntry)); + } catch (UnknownHostException e) { + LOG.warn(e.toString()); + } } } - ipAddresses = (ips.size() > 0) ? ips : null; + inetAddresses = (addrs.size() > 0) ? addrs : null; cidrAddresses = (cidrs.size() > 0) ? cidrs : null; - hostNames = (hosts.size() > 0) ? hosts : null; } } else { - all = false; - ipAddresses = null; - hostNames = null; - cidrAddresses = null; + all = false; + inetAddresses = null; + cidrAddresses = null; + entries = Collections.emptyList(); } } /** - * Accepts an ip address and return true if ipAddress is in the list + * Accepts an ip address and return true if ipAddress is in the list. + * {@link #includes(InetAddress)} should be preferred + * to avoid possibly re-resolving the ip address. + * * @param ipAddress * @return true if ipAddress is part of the list */ @@ -144,71 +152,47 @@ public boolean includes(String ipAddress) { throw new IllegalArgumentException("ipAddress is null."); } - //check in the set of ipAddresses - if ((ipAddresses != null) && ipAddresses.contains(ipAddress)) { + try { + return includes(addressFactory.getByName(ipAddress)); + } catch (UnknownHostException e) { + return false; + } + } + + /** + * Accepts an inet address and return true if address is in the list. + * @param address + * @return true if address is part of the list + */ + public boolean includes(InetAddress address) { + if (all) { return true; } - - //iterate through the ip ranges for inclusion + if (address == null) { + throw new IllegalArgumentException("address is null."); + } + if (inetAddresses != null && inetAddresses.contains(address)) { + return true; + } + // iterate through the ip ranges for inclusion if (cidrAddresses != null) { + String ipAddress = address.getHostAddress(); for(SubnetUtils.SubnetInfo cidrAddress : cidrAddresses) { if(cidrAddress.isInRange(ipAddress)) { return true; } } } - - //check if the ipAddress matches one of hostnames - if (hostNames != null) { - //convert given ipAddress to hostname and look for a match - InetAddress hostAddr; - try { - hostAddr = addressFactory.getByName(ipAddress); - if ((hostAddr != null) && hostNames.contains(hostAddr.getCanonicalHostName())) { - return true; - } - } catch (UnknownHostException e) { - //ignore the exception and proceed to resolve the list of hosts - } - - //loop through host addresses and convert them to ip and look for a match - for (String host : hostNames) { - try { - hostAddr = addressFactory.getByName(host); - } catch (UnknownHostException e) { - continue; - } - if (hostAddr.getHostAddress().equals(ipAddress)) { - return true; - } - } - } return false; } - /** - * returns the contents of the MachineList as a Collection<String> - * This can be used for testing - * @return contents of the MachineList + * returns the contents of the MachineList as a Collection<String> . + * This can be used for testing . + * + * @return contents of the MachineList. */ @VisibleForTesting public Collection getCollection() { - Collection list = new ArrayList(); - if (all) { - list.add("*"); - } else { - if (ipAddresses != null) { - list.addAll(ipAddresses); - } - if (hostNames != null) { - list.addAll(hostNames); - } - if (cidrAddresses != null) { - for(SubnetUtils.SubnetInfo cidrAddress : cidrAddresses) { - list.add(cidrAddress.getCidrSignature()); - } - } - } - return list; + return entries; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java index a8a380ed070d1..11d1176f92a59 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java @@ -74,11 +74,6 @@ public static boolean isNativeCodeLoaded() { return nativeCodeLoaded; } - /** - * Returns true only if this build was compiled with support for snappy. - */ - public static native boolean buildSupportsSnappy(); - /** * Returns true only if this build was compiled with support for ISA-L. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java index 3142df2da36e0..cc41f02d87e48 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java @@ -21,7 +21,7 @@ import org.apache.hadoop.fs.ChecksumException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Wrapper around JNI support code to do checksum computation diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java index 23388248575ac..3847902e79743 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java @@ -22,8 +22,6 @@ import org.apache.hadoop.io.erasurecode.ErasureCodeNative; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.OpensslCipher; -import org.apache.hadoop.io.compress.Lz4Codec; -import org.apache.hadoop.io.compress.SnappyCodec; import org.apache.hadoop.io.compress.bzip2.Bzip2Factory; import org.apache.hadoop.io.compress.zlib.ZlibFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -67,12 +65,9 @@ public static void main(String[] args) { Configuration conf = new Configuration(); boolean nativeHadoopLoaded = NativeCodeLoader.isNativeCodeLoaded(); boolean zlibLoaded = false; - boolean snappyLoaded = false; boolean isalLoaded = false; boolean zStdLoaded = false; boolean pmdkLoaded = false; - // lz4 is linked within libhadoop - boolean lz4Loaded = nativeHadoopLoaded; boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf); boolean openSslLoaded = false; boolean winutilsExists = false; @@ -80,11 +75,9 @@ public static void main(String[] args) { String openSslDetail = ""; String hadoopLibraryName = ""; String zlibLibraryName = ""; - String snappyLibraryName = ""; String isalDetail = ""; String pmdkDetail = ""; String zstdLibraryName = ""; - String lz4LibraryName = ""; String bzip2LibraryName = ""; String winutilsPath = null; @@ -99,11 +92,6 @@ public static void main(String[] args) { if (zStdLoaded && NativeCodeLoader.buildSupportsZstd()) { zstdLibraryName = ZStandardCodec.getLibraryName(); } - snappyLoaded = NativeCodeLoader.buildSupportsSnappy() && - SnappyCodec.isNativeCodeLoaded(); - if (snappyLoaded && NativeCodeLoader.buildSupportsSnappy()) { - snappyLibraryName = SnappyCodec.getLibraryName(); - } isalDetail = ErasureCodeNative.getLoadingFailureReason(); if (isalDetail != null) { @@ -127,9 +115,6 @@ public static void main(String[] args) { openSslLoaded = true; } - if (lz4Loaded) { - lz4LibraryName = Lz4Codec.getLibraryName(); - } if (bzip2Loaded) { bzip2LibraryName = Bzip2Factory.getLibraryName(conf); } @@ -152,8 +137,6 @@ public static void main(String[] args) { System.out.printf("hadoop: %b %s%n", nativeHadoopLoaded, hadoopLibraryName); System.out.printf("zlib: %b %s%n", zlibLoaded, zlibLibraryName); System.out.printf("zstd : %b %s%n", zStdLoaded, zstdLibraryName); - System.out.printf("snappy: %b %s%n", snappyLoaded, snappyLibraryName); - System.out.printf("lz4: %b %s%n", lz4Loaded, lz4LibraryName); System.out.printf("bzip2: %b %s%n", bzip2Loaded, bzip2LibraryName); System.out.printf("openssl: %b %s%n", openSslLoaded, openSslDetail); System.out.printf("ISA-L: %b %s%n", isalLoaded, isalDetail); @@ -164,8 +147,8 @@ public static void main(String[] args) { } if ((!nativeHadoopLoaded) || (Shell.WINDOWS && (!winutilsExists)) || - (checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded - && bzip2Loaded && isalLoaded && zStdLoaded))) { + (checkAll && !(zlibLoaded && bzip2Loaded + && isalLoaded && zStdLoaded))) { // return 1 to indicated check failed ExitUtil.terminate(1); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/OperationDuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/OperationDuration.java index 3276d2138bbfc..fdd25286a2300 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/OperationDuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/OperationDuration.java @@ -18,48 +18,98 @@ package org.apache.hadoop.util; +import java.time.Duration; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Little duration counter. */ -@InterfaceAudience.Private +@InterfaceAudience.Public @InterfaceStability.Unstable public class OperationDuration { + /** + * Time in millis when the operation started. + */ private final long started; + + /** + * Time when the operation finished. + */ private long finished; + /** + * Instantiate. + * The start time and finished time are both set + * to the current clock time. + */ public OperationDuration() { started = time(); finished = started; } + /** + * Evaluate the system time. + * @return the current clock time. + */ protected long time() { return System.currentTimeMillis(); } + /** + * Update the finished time with the current system time. + */ public void finished() { finished = time(); } + /** + * Return the duration as {@link #humanTime(long)}. + * @return a printable duration. + */ public String getDurationString() { return humanTime(value()); } + /** + * Convert to a human time of minutes:seconds.millis. + * @param time time to humanize. + * @return a printable value. + */ public static String humanTime(long time) { long seconds = (time / 1000); long minutes = (seconds / 60); return String.format("%d:%02d.%03ds", minutes, seconds % 60, time % 1000); } + /** + * Return the duration as {@link #humanTime(long)}. + * @return a printable duration. + */ @Override public String toString() { return getDurationString(); } + /** + * Get the duration in milliseconds. + *

    + * This will be 0 until a call + * to {@link #finished()} has been made. + * @return the currently recorded duration. + */ public long value() { return finished -started; } + + /** + * Get the duration of an operation as a java Duration + * instance. + * @return a duration. + */ + public Duration asDuration() { + return Duration.ofMillis(value()); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidatorMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidatorMetrics.java index 620186298de66..83cc6dcd9b576 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidatorMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidatorMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.util; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsSystem; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java index 4ec77e75ba520..45b9a98c68a7e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java @@ -18,10 +18,8 @@ package org.apache.hadoop.util; -import com.google.common.util.concurrent.ForwardingListeningExecutorService; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ForwardingExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.classification.InterfaceAudience; @@ -29,6 +27,7 @@ import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -49,10 +48,10 @@ @SuppressWarnings("NullableProblems") @InterfaceAudience.Private public class SemaphoredDelegatingExecutor extends - ForwardingListeningExecutorService { + ForwardingExecutorService { private final Semaphore queueingPermits; - private final ListeningExecutorService executorDelegatee; + private final ExecutorService executorDelegatee; private final int permitCount; /** @@ -62,7 +61,7 @@ public class SemaphoredDelegatingExecutor extends * @param fair should the semaphore be "fair" */ public SemaphoredDelegatingExecutor( - ListeningExecutorService executorDelegatee, + ExecutorService executorDelegatee, int permitCount, boolean fair) { this.permitCount = permitCount; @@ -71,7 +70,7 @@ public SemaphoredDelegatingExecutor( } @Override - protected ListeningExecutorService delegate() { + protected ExecutorService delegate() { return executorDelegatee; } @@ -102,7 +101,7 @@ public T invokeAny(Collection> tasks, long timeout, } @Override - public ListenableFuture submit(Callable task) { + public Future submit(Callable task) { try { queueingPermits.acquire(); } catch (InterruptedException e) { @@ -113,7 +112,7 @@ public ListenableFuture submit(Callable task) { } @Override - public ListenableFuture submit(Runnable task, T result) { + public Future submit(Runnable task, T result) { try { queueingPermits.acquire(); } catch (InterruptedException e) { @@ -124,7 +123,7 @@ public ListenableFuture submit(Runnable task, T result) { } @Override - public ListenableFuture submit(Runnable task) { + public Future submit(Runnable task) { try { queueingPermits.acquire(); } catch (InterruptedException e) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java index 9ba9e94dff5aa..5096b10951d38 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @InterfaceAudience.Private @InterfaceStability.Unstable diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index e66c81b4b8df6..8625b214fde52 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -34,7 +34,7 @@ import java.util.WeakHashMap; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java index 76d90063609b2..f044295a8068d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.util; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -147,14 +147,14 @@ private static void shutdownExecutor(final Configuration conf) { shutdownTimeout, TIME_UNIT_DEFAULT)) { // timeout waiting for the - LOG.error("ShutdownHookManger shutdown forcefully after" + LOG.error("ShutdownHookManager shutdown forcefully after" + " {} seconds.", shutdownTimeout); EXECUTOR.shutdownNow(); } - LOG.debug("ShutdownHookManger completed shutdown."); + LOG.debug("ShutdownHookManager completed shutdown."); } catch (InterruptedException ex) { // interrupted. - LOG.error("ShutdownHookManger interrupted while waiting for " + + LOG.error("ShutdownHookManager interrupted while waiting for " + "termination.", ex); EXECUTOR.shutdownNow(); Thread.currentThread().interrupt(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java index 50a728e568a4e..296727e67b44f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.util; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java index 028e49acbe59a..4a30ee2800aa9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java @@ -21,8 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.collect.Interner; -import com.google.common.collect.Interners; +import org.apache.hadoop.thirdparty.com.google.common.collect.Interner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Interners; /** * Provides string interning utility methods. For weak interning, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index cf7b04ab61a7e..9e7b36f71e211 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -42,8 +42,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.net.NetUtils; -import com.google.common.base.Preconditions; -import com.google.common.net.InetAddresses; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; /** * General string utils diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java index 3591bb81e121c..6f2f585c87f22 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java @@ -30,7 +30,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java index e8940141b43c5..4d86153345bae 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java @@ -19,7 +19,7 @@ import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java index 48cef5f06fcc9..8e4e67d1b61e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java @@ -27,10 +27,10 @@ import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Id; -import com.google.common.base.Charsets; -import com.google.common.base.Splitter; -import com.google.common.collect.Lists; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; /** * Utilities for working with ZooKeeper. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java index 61eb777d4806d..46fc8df37d3f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.util.concurrent; -import com.google.common.util.concurrent.AbstractFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.AbstractFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java index 86142fb6d3a4f..e125dbfbd0abb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ChildReaper.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.util.curator; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.curator.framework.recipes.locks.Reaper; import org.apache.curator.utils.CloseableUtils; import org.apache.curator.framework.CuratorFramework; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java index 36dade27dd6f7..3e3939515bc1d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java @@ -39,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Helper class that provides utility methods specific to ZK operations. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/BiFunctionRaisingIOE.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/BiFunctionRaisingIOE.java new file mode 100644 index 0000000000000..ea17c16d01e87 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/BiFunctionRaisingIOE.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.functional; + +import java.io.IOException; + +/** + * Function of arity 2 which may raise an IOException. + * @param type of arg1 + * @param type of arg2 + * @param type of return value. + */ +@FunctionalInterface +public interface BiFunctionRaisingIOE { + + /** + * Apply the function. + * @param t argument 1 + * @param u argument 2 + * @return result + * @throws IOException Any IO failure + */ + R apply(T t, U u) throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CallableRaisingIOE.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CallableRaisingIOE.java new file mode 100644 index 0000000000000..65b3a63b2b9a0 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/CallableRaisingIOE.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.functional; + +import java.io.IOException; + +/** + * This is a callable which only raises an IOException. + * @param return type + */ +@FunctionalInterface +public interface CallableRaisingIOE { + + /** + * Apply the operation. + * @return result + * @throws IOException Any IO failure + */ + R apply() throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/ConsumerRaisingIOE.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/ConsumerRaisingIOE.java new file mode 100644 index 0000000000000..24a3b55c58d4a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/ConsumerRaisingIOE.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.functional; + +import java.io.IOException; + +/** + * Version of java.util.function.Consumer which raises + * exceptions. + * @param type of argument,. + */ +@FunctionalInterface +public interface ConsumerRaisingIOE { + + /** + * Process the argument. + * @param t type + * @throws IOException if needed + */ + void accept(T t) throws IOException; + + /** + * after calling {@link #accept(Object)}, + * invoke the next consumer in the chain. + * @param next next consumer + * @return the chain. + */ + default ConsumerRaisingIOE andThen( + ConsumerRaisingIOE next) { + return (T t) -> { + accept(t); + next.accept(t); + }; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/FunctionRaisingIOE.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/FunctionRaisingIOE.java new file mode 100644 index 0000000000000..83e041e2b3160 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/FunctionRaisingIOE.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.functional; + +import java.io.IOException; + +/** + * Function of arity 1 which may raise an IOException. + * @param type of arg1 + * @param type of return value. + */ +@FunctionalInterface +public interface FunctionRaisingIOE { + + /** + * Apply the function. + * @param t argument 1 + * @return result + * @throws IOException Any IO failure + */ + R apply(T t) throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/FutureIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/FutureIO.java new file mode 100644 index 0000000000000..3f7218baa759f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/FutureIO.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.functional; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.io.UncheckedIOException; +import java.util.concurrent.CompletionException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Future IO Helper methods. + *

    + * Contains methods promoted from + * {@link org.apache.hadoop.fs.impl.FutureIOSupport} because they + * are a key part of integrating async IO in application code. + *

    + *

    + * One key feature is that the {@link #awaitFuture(Future)} and + * {@link #awaitFuture(Future, long, TimeUnit)} calls will + * extract and rethrow exceptions raised in the future's execution, + * including extracting the inner IOException of any + * {@code UncheckedIOException} raised in the future. + * This makes it somewhat easier to execute IOException-raising + * code inside futures. + *

    + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public final class FutureIO { + + private FutureIO() { + } + + /** + * Given a future, evaluate it. + *

    + * Any exception generated in the future is + * extracted and rethrown. + *

    + * @param future future to evaluate + * @param type of the result. + * @return the result, if all went well. + * @throws InterruptedIOException future was interrupted + * @throws IOException if something went wrong + * @throws RuntimeException any nested RTE thrown + */ + public static T awaitFuture(final Future future) + throws InterruptedIOException, IOException, RuntimeException { + try { + return future.get(); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException(e.toString()) + .initCause(e); + } catch (ExecutionException e) { + return raiseInnerCause(e); + } + } + + /** + * Given a future, evaluate it. + *

    + * Any exception generated in the future is + * extracted and rethrown. + *

    + * @param future future to evaluate + * @param type of the result. + * @return the result, if all went well. + * @throws InterruptedIOException future was interrupted + * @throws IOException if something went wrong + * @throws RuntimeException any nested RTE thrown + * @throws TimeoutException the future timed out. + */ + public static T awaitFuture(final Future future, + final long timeout, + final TimeUnit unit) + throws InterruptedIOException, IOException, RuntimeException, + TimeoutException { + try { + return future.get(timeout, unit); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException(e.toString()) + .initCause(e); + } catch (ExecutionException e) { + return raiseInnerCause(e); + } + } + + /** + * From the inner cause of an execution exception, extract the inner cause + * if it is an IOE or RTE. + * This will always raise an exception, either the inner IOException, + * an inner RuntimeException, or a new IOException wrapping the raised + * exception. + * + * @param e exception. + * @param type of return value. + * @return nothing, ever. + * @throws IOException either the inner IOException, or a wrapper around + * any non-Runtime-Exception + * @throws RuntimeException if that is the inner cause. + */ + public static T raiseInnerCause(final ExecutionException e) + throws IOException { + throw unwrapInnerException(e); + } + + /** + * Extract the cause of a completion failure and rethrow it if an IOE + * or RTE. + * @param e exception. + * @param type of return value. + * @return nothing, ever. + * @throws IOException either the inner IOException, or a wrapper around + * any non-Runtime-Exception + * @throws RuntimeException if that is the inner cause. + */ + public static T raiseInnerCause(final CompletionException e) + throws IOException { + throw unwrapInnerException(e); + } + + /** + * From the inner cause of an execution exception, extract the inner cause + * to an IOException, raising RuntimeExceptions and Errors immediately. + *
      + *
    1. If it is an IOE: Return.
    2. + *
    3. If it is a {@link UncheckedIOException}: return the cause
    4. + *
    5. Completion/Execution Exceptions: extract and repeat
    6. + *
    7. If it is an RTE or Error: throw.
    8. + *
    9. Any other type: wrap in an IOE
    10. + *
    + * + * Recursively handles wrapped Execution and Completion Exceptions in + * case something very complicated has happened. + * @param e exception. + * @return an IOException extracted or built from the cause. + * @throws RuntimeException if that is the inner cause. + * @throws Error if that is the inner cause. + */ + @SuppressWarnings("ChainOfInstanceofChecks") + public static IOException unwrapInnerException(final Throwable e) { + Throwable cause = e.getCause(); + if (cause instanceof IOException) { + return (IOException) cause; + } else if (cause instanceof UncheckedIOException) { + // this is always an IOException + return ((UncheckedIOException) cause).getCause(); + } else if (cause instanceof CompletionException) { + return unwrapInnerException(cause); + } else if (cause instanceof ExecutionException) { + return unwrapInnerException(cause); + } else if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else if (cause instanceof Error) { + throw (Error) cause; + } else if (cause != null) { + // other type: wrap with a new IOE + return new IOException(cause); + } else { + // this only happens if there was no cause. + return new IOException(e); + } + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/InvocationRaisingIOE.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/InvocationRaisingIOE.java new file mode 100644 index 0000000000000..b59dabea89ea9 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/InvocationRaisingIOE.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.functional; + +import java.io.IOException; + +/** + * This is a lambda-expression which may raises an IOException. + * This is a recurrent design patten in the hadoop codebase, e.g + * {@code LambdaTestUtils.VoidCallable} and + * the S3A {@code Invoker.VoidOperation}}. Hopefully this should + * be the last. + * Note for implementors of methods which take this as an argument: + * don't use method overloading to determine which specific functional + * interface is to be used. + */ +@FunctionalInterface +public interface InvocationRaisingIOE { + + /** + * Apply the operation. + * @throws IOException Any IO failure + */ + void apply() throws IOException; + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java new file mode 100644 index 0000000000000..3ac0fced1493d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/RemoteIterators.java @@ -0,0 +1,698 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.functional; + +import javax.annotation.Nullable; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Objects; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.io.IOUtils; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.logIOStatisticsAtDebug; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; + +/** + * A set of remote iterators supporting transformation and filtering, + * with IOStatisticsSource passthrough, and of conversions of + * the iterators to lists/arrays and of performing actions + * on the values. + *

    + * This aims to make it straightforward to use lambda-expressions to + * transform the results of an iterator, without losing the statistics + * in the process, and to chain the operations together. + *

    + * The closeable operation will be passed through RemoteIterators which + * wrap other RemoteIterators. This is to support any iterator which + * can be closed to release held connections, file handles etc. + * Unless client code is written to assume that RemoteIterator instances + * may be closed, this is not likely to be broadly used. It is added + * to make it possible to adopt this feature in a managed way. + *

    + * One notable feature is that the + * {@link #foreach(RemoteIterator, ConsumerRaisingIOE)} method will + * LOG at debug any IOStatistics provided by the iterator, if such + * statistics are provided. There's no attempt at retrieval and logging + * if the LOG is not set to debug, so it is a zero cost feature unless + * the logger {@code org.apache.hadoop.fs.functional.RemoteIterators} + * is at DEBUG. + *

    + * Based on the S3A Listing code, and some some work on moving other code + * to using iterative listings so as to pick up the statistics. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public final class RemoteIterators { + + /** + * Log used for logging any statistics in + * {@link #foreach(RemoteIterator, ConsumerRaisingIOE)} + * at DEBUG. + */ + private static final Logger LOG = LoggerFactory.getLogger( + RemoteIterators.class); + + private RemoteIterators() { + } + + /** + * Create an iterator from a singleton. + * @param singleton instance + * @param type + * @return a remote iterator + */ + public static RemoteIterator remoteIteratorFromSingleton( + @Nullable T singleton) { + return new SingletonIterator<>(singleton); + } + + /** + * Create a remote iterator from a java.util.Iterator. + * @param type + * @return a remote iterator + */ + public static RemoteIterator remoteIteratorFromIterator( + Iterator iterator) { + return new WrappedJavaIterator<>(iterator); + } + + /** + * Create a remote iterator from a java.util.Iterable -e.g. a list + * or other collection. + * @param type + * @return a remote iterator + */ + public static RemoteIterator remoteIteratorFromIterable( + Iterable iterable) { + return new WrappedJavaIterator<>(iterable.iterator()); + } + + /** + * Create a remote iterator from an array. + * @param type + * @return a remote iterator + */ + public static RemoteIterator remoteIteratorFromArray(T[] array) { + return new WrappedJavaIterator<>(Arrays.stream(array).iterator()); + } + + /** + * Create an iterator from an iterator and a transformation function. + * @param source type + * @param result type + * @param iterator source + * @param mapper transformation + * @return a remote iterator + */ + public static RemoteIterator mappingRemoteIterator( + RemoteIterator iterator, + FunctionRaisingIOE mapper) { + return new MappingRemoteIterator<>(iterator, mapper); + } + + /** + * Create a RemoteIterator from a RemoteIterator, casting the + * type in the process. This is to help with filesystem API + * calls where overloading causes confusion (e.g. listStatusIterator()) + * @param source type + * @param result type + * @param iterator source + * @return a remote iterator + */ + public static RemoteIterator typeCastingRemoteIterator( + RemoteIterator iterator) { + return new TypeCastingRemoteIterator<>(iterator); + } + + /** + * Create a RemoteIterator from a RemoteIterator and a filter + * function which returns true for every element to be passed + * through. + *

    + * Elements are filtered in the hasNext() method; if not used + * the filtering will be done on demand in the {@code next()} + * call. + * @param type + * @param iterator source + * @param filter filter + * @return a remote iterator + */ + public static RemoteIterator filteringRemoteIterator( + RemoteIterator iterator, + FunctionRaisingIOE filter) { + return new FilteringRemoteIterator<>(iterator, filter); + } + + /** + * This adds an extra close operation alongside the passthrough + * to any Closeable.close() method supported by the source iterator. + * @param iterator source + * @param toClose extra object to close. + * @param source type. + * @return a new iterator + */ + public static RemoteIterator closingRemoteIterator( + RemoteIterator iterator, + Closeable toClose) { + return new CloseRemoteIterator<>(iterator, toClose); + } + + /** + * Build a list from a RemoteIterator. + * @param type + * @return a list of the values. + * @throws IOException if the source RemoteIterator raises it. + */ + public static List toList(RemoteIterator source) + throws IOException { + List l = new ArrayList<>(); + foreach(source, l::add); + return l; + } + + /** + * Build an array from a RemoteIterator. + * @param type + * @return an array of the values. + * @throws IOException if the source RemoteIterator raises it. + */ + public static T[] toArray(RemoteIterator source) throws IOException { + return (T[]) toList(source).toArray(); + } + + /** + * Apply an operation to all values of a RemoteIterator. + *

    + * If the iterator is an IOStatisticsSource returning a non-null + * set of statistics, and this classes log is set to DEBUG, + * then the statistics of the operation are evaluated and logged at + * debug. + *

    + * The number of entries processed is returned, as it is useful to + * know this, especially during tests or when reporting values + * to users. + *

    + * This does not close the iterator afterwards. + * @param source iterator source + * @param consumer consumer of the values. + * @return the number of elements processed + * @param type of source + * @throws IOException if the source RemoteIterator or the consumer raise one. + */ + public static long foreach( + RemoteIterator source, + ConsumerRaisingIOE consumer) throws IOException { + long count = 0; + + try { + while (source.hasNext()) { + count++; + consumer.accept(source.next()); + } + + // maybe log the results + logIOStatisticsAtDebug(LOG, "RemoteIterator Statistics: {}", source); + } finally { + if (source instanceof Closeable) { + // source is closeable, so close. + IOUtils.cleanupWithLogger(LOG, (Closeable) source); + } + } + + return count; + } + + /** + * A remote iterator from a singleton. It has a single next() + * value, after which hasNext() returns false and next() fails. + *

    + * If it is a source of + * remote statistics, these are returned. + * @param type. + */ + private static final class SingletonIterator + implements RemoteIterator, IOStatisticsSource { + + /** + * Single entry. + */ + private final T singleton; + + /** Has the entry been processed? */ + private boolean processed; + + /** + * Instantiate. + * @param singleton single value...may be null + */ + private SingletonIterator(@Nullable T singleton) { + this.singleton = singleton; + // if the entry is null, consider it processed. + this.processed = singleton == null; + } + + @Override + public boolean hasNext() throws IOException { + return !processed; + } + + @SuppressWarnings("NewExceptionWithoutArguments") + @Override + public T next() throws IOException { + if (hasNext()) { + processed = true; + return singleton; + } else { + throw new NoSuchElementException(); + } + } + + @Override + public IOStatistics getIOStatistics() { + return retrieveIOStatistics(singleton); + } + + @Override + public String toString() { + return "SingletonIterator{" + + (singleton != null ? singleton : "") + + '}'; + } + + } + + /** + * Create a remote iterator from a simple java.util.Iterator, or + * an iterable. + *

    + * If the iterator is a source of statistics that is passed through. + *

    + * The {@link #close()} will close the source iterator if it is + * Closeable; + * @param iterator type. + */ + private static final class WrappedJavaIterator + implements RemoteIterator, IOStatisticsSource, Closeable { + + /** + * inner iterator.. + */ + private final Iterator source; + + private final Closeable sourceToClose; + + + /** + * Construct from an interator. + * @param source source iterator. + */ + private WrappedJavaIterator(Iterator source) { + this.source = requireNonNull(source); + sourceToClose = new MaybeClose(source); + } + + @Override + public boolean hasNext() { + return source.hasNext(); + } + + @Override + public T next() { + return source.next(); + } + + @Override + public IOStatistics getIOStatistics() { + return retrieveIOStatistics(source); + } + + @Override + public String toString() { + return "FromIterator{" + source + '}'; + } + + @Override + public void close() throws IOException { + sourceToClose.close(); + + } + } + + /** + * Wrapper of another remote iterator; IOStatistics + * and Closeable methods are passed down if implemented. + * @param source type + * @param type of returned value + */ + private static abstract class WrappingRemoteIterator + implements RemoteIterator, IOStatisticsSource, Closeable { + + /** + * Source iterator. + */ + private final RemoteIterator source; + + private final Closeable sourceToClose; + + protected WrappingRemoteIterator(final RemoteIterator source) { + this.source = requireNonNull(source); + sourceToClose = new MaybeClose(source); + } + + protected RemoteIterator getSource() { + return source; + } + + @Override + public IOStatistics getIOStatistics() { + return retrieveIOStatistics(source); + } + + @Override + public void close() throws IOException { + sourceToClose.close(); + } + + /** + * Check for the source having a next element. + * If it does not, this object's close() method + * is called and false returned + * @return true if there is a new value + * @throws IOException failure to retrieve next value + */ + protected boolean sourceHasNext() throws IOException { + boolean hasNext; + try { + hasNext = getSource().hasNext(); + } catch (IOException e) { + IOUtils.cleanupWithLogger(LOG, this); + throw e; + } + if (!hasNext) { + // there is nothing less so automatically close. + close(); + } + return hasNext; + } + + /** + * Get the next source value. + * This calls {@link #sourceHasNext()} first to verify + * that there is data. + * @return the next value + * @throws IOException failure + * @throws NoSuchElementException no more data + */ + protected S sourceNext() throws IOException { + try { + if (!sourceHasNext()) { + throw new NoSuchElementException(); + } + return getSource().next(); + } catch (NoSuchElementException | IOException e) { + IOUtils.cleanupWithLogger(LOG, this); + throw e; + } + } + + @Override + public String toString() { + return source.toString(); + } + + } + + /** + * Iterator taking a source and a transformational function. + * @param source type + * @param final output type.There + */ + private static final class MappingRemoteIterator + extends WrappingRemoteIterator { + + /** + * Mapper to invoke. + */ + private final FunctionRaisingIOE mapper; + + private MappingRemoteIterator( + RemoteIterator source, + FunctionRaisingIOE mapper) { + super(source); + this.mapper = requireNonNull(mapper); + } + + @Override + public boolean hasNext() throws IOException { + return sourceHasNext(); + } + + @Override + public T next() throws IOException { + return mapper.apply(sourceNext()); + } + + @Override + public String toString() { + return "FunctionRemoteIterator{" + getSource() + '}'; + } + } + + /** + * RemoteIterator which can change the type of the input. + * This is useful in some situations. + * @param source type + * @param final output type. + */ + private static final class TypeCastingRemoteIterator + extends WrappingRemoteIterator { + + private TypeCastingRemoteIterator( + RemoteIterator source) { + super(source); + } + + @Override + public boolean hasNext() throws IOException { + return sourceHasNext(); + } + + @Override + public T next() throws IOException { + return (T)sourceNext(); + } + + @Override + public String toString() { + return getSource().toString(); + } + } + + /** + * Extend the wrapped iterator by filtering source values out. + * Only those values for which the filter predicate returns true + * will be returned. + * @param type of iterator. + */ + @SuppressWarnings("NewExceptionWithoutArguments") + private static final class FilteringRemoteIterator + extends WrappingRemoteIterator { + + /** + * Filter Predicate. + * Takes the input type or any superclass. + */ + private final FunctionRaisingIOE + filter; + + /** + * Next value; will be null if none has been evaluated, or the + * last one was already returned by next(). + */ + private S next; + + /** + * An iterator which combines filtering with transformation. + * All source elements for which filter = true are returned, + * transformed via the mapper. + * @param source source iterator. + * @param filter filter predicate. + */ + private FilteringRemoteIterator( + RemoteIterator source, + FunctionRaisingIOE filter) { + super(source); + + this.filter = requireNonNull(filter); + } + + /** + * Fetch: retrieve the next value. + * @return true if a new value was found after filtering. + * @throws IOException failure in retrieval from source or mapping + */ + private boolean fetch() throws IOException { + while (next == null && sourceHasNext()) { + S candidate = getSource().next(); + if (filter.apply(candidate)) { + next = candidate; + return true; + } + } + return false; + } + + /** + * Trigger a fetch if an entry is needed. + * @return true if there was already an entry return, + * or there was not but one could then be retrieved.set + * @throws IOException failure in fetch operation + */ + @Override + public boolean hasNext() throws IOException { + if (next != null) { + return true; + } + return fetch(); + } + + /** + * Return the next value. + * Will retrieve the next elements if needed. + * This is where the mapper takes place. + * @return true if there is another data element. + * @throws IOException failure in fetch operation or the transformation. + * @throws NoSuchElementException no more data + */ + @Override + public S next() throws IOException { + if (hasNext()) { + S result = next; + next = null; + return result; + } + throw new NoSuchElementException(); + } + + @Override + public String toString() { + return "FilteringRemoteIterator{" + getSource() + '}'; + } + } + + /** + * A wrapping remote iterator which adds another entry to + * close. This is to assist cleanup. + * @param type + */ + private static final class CloseRemoteIterator + extends WrappingRemoteIterator { + + private final MaybeClose toClose; + private boolean closed; + + private CloseRemoteIterator( + final RemoteIterator source, + final Closeable toClose) { + super(source); + this.toClose = new MaybeClose(Objects.requireNonNull(toClose)); + } + + @Override + public boolean hasNext() throws IOException { + return sourceHasNext(); + } + + @Override + public S next() throws IOException { + + return sourceNext(); + } + + @Override + public void close() throws IOException { + if (closed) { + return; + } + closed = true; + LOG.debug("Closing {}", this); + try { + super.close(); + } finally { + toClose.close(); + } + } + } + + /** + * Class to help with Closeable logic, where sources may/may not + * be closeable, only one invocation is allowed. + * On the second and later call of close(), it is a no-op. + */ + private static final class MaybeClose implements Closeable { + + private Closeable toClose; + + /** + * Construct. + * @param o object to close. + */ + private MaybeClose(Object o) { + this(o, true); + } + + /** + * Construct -close the object if it is closeable and close==true. + * @param o object to close. + * @param close should close? + */ + private MaybeClose(Object o, boolean close) { + if (close && o instanceof Closeable) { + this.toClose = (Closeable) o; + } else { + this.toClose = null; + } + } + + @Override + public void close() throws IOException { + if (toClose != null) { + try { + toClose.close(); + } finally { + toClose = null; + } + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/package-info.java new file mode 100644 index 0000000000000..1c204bb9979a8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/functional/package-info.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Support for functional programming within the Hadoop APIs. + *

    + * Much of this is needed simply to cope with Java's checked exceptions and + * the fact that the java.util.function can only throw runtime exceptions. + *

    + * Pretty much all the Hadoop FS APIs raise IOExceptions, hence the need + * for these classes. If Java had made a different decision about the + * nature of exceptions, life would be better. + *

    + * Do note that the {@link org.apache.hadoop.util.functional.RemoteIterators} + * iterators go beyond that of the java ones, in terms of declaring themselves + * Closeable and implementors of + * {@link org.apache.hadoop.fs.statistics.IOStatisticsSource}; a chain + * of wrapped iterators can supply statistics of the inner iterators, and + * encourage close() to be called after use. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +package org.apache.hadoop.util.functional; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj index ac3767b276882..36b560305b36a 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj +++ b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj @@ -17,7 +17,7 @@ limitations under the License. --> - + Release @@ -69,15 +69,6 @@ hadoop - $(CustomSnappyPrefix) - $(CustomSnappyPrefix)\lib - $(CustomSnappyPrefix)\bin - $(CustomSnappyLib) - $(CustomSnappyPrefix) - $(CustomSnappyPrefix)\include - $(CustomSnappyInclude) - true - $(SnappyInclude);$(IncludePath) $(ZLIB_HOME);$(IncludePath) @@ -87,11 +78,6 @@ $(CustomIsalLib) true - - - - - /D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\" - - - /D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\" - - - - - - /D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\" /D HADOOP_ISAL_LIBRARY=L\"isa-l.dll\" @@ -181,7 +156,6 @@ - diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c index 26e1fa623e859..3f141be05b549 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c @@ -42,16 +42,18 @@ #ifdef UNIX static void * (*dlsym_CRYPTO_malloc) (int, const char *, int); static void (*dlsym_CRYPTO_free) (void *); +#if OPENSSL_VERSION_NUMBER < 0x10100000L static int (*dlsym_CRYPTO_num_locks) (void); static void (*dlsym_CRYPTO_set_locking_callback) (void (*)()); static void (*dlsym_CRYPTO_set_id_callback) (unsigned long (*)()); static void (*dlsym_ENGINE_load_rdrand) (void); +static void (*dlsym_ENGINE_cleanup) (void); +#endif static ENGINE * (*dlsym_ENGINE_by_id) (const char *); static int (*dlsym_ENGINE_init) (ENGINE *); static int (*dlsym_ENGINE_set_default) (ENGINE *, unsigned int); static int (*dlsym_ENGINE_finish) (ENGINE *); static int (*dlsym_ENGINE_free) (ENGINE *); -static void (*dlsym_ENGINE_cleanup) (void); static int (*dlsym_RAND_bytes) (unsigned char *, int); static unsigned long (*dlsym_ERR_get_error) (void); #endif @@ -113,6 +115,8 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_ dlerror(); // Clear any existing error LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_malloc, env, openssl, "CRYPTO_malloc"); LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_free, env, openssl, "CRYPTO_free"); +#if OPENSSL_VERSION_NUMBER < 0x10100000L + // pre-1.1.0 LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_num_locks, env, openssl, "CRYPTO_num_locks"); LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_set_locking_callback, \ env, openssl, "CRYPTO_set_locking_callback"); @@ -120,13 +124,14 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_ openssl, "CRYPTO_set_id_callback"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_load_rdrand, env, \ openssl, "ENGINE_load_rdrand"); + LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_cleanup, env, openssl, "ENGINE_cleanup"); +#endif LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_by_id, env, openssl, "ENGINE_by_id"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_init, env, openssl, "ENGINE_init"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_set_default, env, \ openssl, "ENGINE_set_default"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_finish, env, openssl, "ENGINE_finish"); LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_free, env, openssl, "ENGINE_free"); - LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_cleanup, env, openssl, "ENGINE_cleanup"); LOAD_DYNAMIC_SYMBOL(dlsym_RAND_bytes, env, openssl, "RAND_bytes"); LOAD_DYNAMIC_SYMBOL(dlsym_ERR_get_error, env, openssl, "ERR_get_error"); #endif @@ -303,9 +308,11 @@ static unsigned long pthreads_thread_id(void) */ static ENGINE * openssl_rand_init(void) { +#if OPENSSL_VERSION_NUMBER < 0x10100000L locks_setup(); dlsym_ENGINE_load_rdrand(); +#endif ENGINE *eng = dlsym_ENGINE_by_id("rdrand"); int ret = -1; @@ -340,11 +347,12 @@ static void openssl_rand_clean(ENGINE *eng, int clean_locks) dlsym_ENGINE_finish(eng); dlsym_ENGINE_free(eng); } - +#if OPENSSL_VERSION_NUMBER < 0x10100000L dlsym_ENGINE_cleanup(); if (clean_locks) { locks_cleanup(); } +#endif } static int openssl_rand_bytes(unsigned char *buf, int num) diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c deleted file mode 100644 index 2c8af1b9115d5..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#include "org_apache_hadoop.h" -#include "org_apache_hadoop_io_compress_lz4_Lz4Compressor.h" - -#ifdef UNIX -#include "config.h" -#endif // UNIX -#include "lz4.h" -#include "lz4hc.h" - - -static jfieldID Lz4Compressor_uncompressedDirectBuf; -static jfieldID Lz4Compressor_uncompressedDirectBufLen; -static jfieldID Lz4Compressor_compressedDirectBuf; -static jfieldID Lz4Compressor_directBufferSize; - - -JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_initIDs -(JNIEnv *env, jclass clazz){ - - Lz4Compressor_uncompressedDirectBuf = (*env)->GetFieldID(env, clazz, - "uncompressedDirectBuf", - "Ljava/nio/Buffer;"); - Lz4Compressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, clazz, - "uncompressedDirectBufLen", "I"); - Lz4Compressor_compressedDirectBuf = (*env)->GetFieldID(env, clazz, - "compressedDirectBuf", - "Ljava/nio/Buffer;"); - Lz4Compressor_directBufferSize = (*env)->GetFieldID(env, clazz, - "directBufferSize", "I"); -} - -JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_compressBytesDirect -(JNIEnv *env, jobject thisj){ - const char* uncompressed_bytes; - char *compressed_bytes; - - // Get members of Lz4Compressor - jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_uncompressedDirectBuf); - jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen); - jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_compressedDirectBuf); - jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_directBufferSize); - - // Get the input direct buffer - uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf); - - if (uncompressed_bytes == 0) { - return (jint)0; - } - - // Get the output direct buffer - compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf); - - if (compressed_bytes == 0) { - return (jint)0; - } - - compressed_direct_buf_len = LZ4_compress(uncompressed_bytes, compressed_bytes, uncompressed_direct_buf_len); - if (compressed_direct_buf_len < 0){ - THROW(env, "java/lang/InternalError", "LZ4_compress failed"); - } - - (*env)->SetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen, 0); - - return (jint)compressed_direct_buf_len; -} - -JNIEXPORT jstring JNICALL -Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_getLibraryName( - JNIEnv *env, jclass class - ) { - char version_buf[128]; - snprintf(version_buf, sizeof(version_buf), "revision:%d", LZ4_versionNumber()); - return (*env)->NewStringUTF(env, version_buf); -} - -JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_compressBytesDirectHC -(JNIEnv *env, jobject thisj){ - const char* uncompressed_bytes = NULL; - char* compressed_bytes = NULL; - - // Get members of Lz4Compressor - jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_uncompressedDirectBuf); - jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen); - jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_compressedDirectBuf); - jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_directBufferSize); - - // Get the input direct buffer - uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf); - - if (uncompressed_bytes == 0) { - return (jint)0; - } - - // Get the output direct buffer - compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf); - - if (compressed_bytes == 0) { - return (jint)0; - } - - compressed_direct_buf_len = LZ4_compressHC(uncompressed_bytes, compressed_bytes, uncompressed_direct_buf_len); - if (compressed_direct_buf_len < 0){ - THROW(env, "java/lang/InternalError", "LZ4_compressHC failed"); - } - - (*env)->SetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen, 0); - - return (jint)compressed_direct_buf_len; -} diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c deleted file mode 100644 index cdeaa315d1e59..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "org_apache_hadoop.h" -#include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h" - -#ifdef UNIX -#include "config.h" -#endif // UNIX -#include "lz4.h" - - -static jfieldID Lz4Decompressor_compressedDirectBuf; -static jfieldID Lz4Decompressor_compressedDirectBufLen; -static jfieldID Lz4Decompressor_uncompressedDirectBuf; -static jfieldID Lz4Decompressor_directBufferSize; - -JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_initIDs -(JNIEnv *env, jclass clazz){ - - Lz4Decompressor_compressedDirectBuf = (*env)->GetFieldID(env,clazz, - "compressedDirectBuf", - "Ljava/nio/Buffer;"); - Lz4Decompressor_compressedDirectBufLen = (*env)->GetFieldID(env,clazz, - "compressedDirectBufLen", "I"); - Lz4Decompressor_uncompressedDirectBuf = (*env)->GetFieldID(env,clazz, - "uncompressedDirectBuf", - "Ljava/nio/Buffer;"); - Lz4Decompressor_directBufferSize = (*env)->GetFieldID(env, clazz, - "directBufferSize", "I"); -} - -JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_decompressBytesDirect -(JNIEnv *env, jobject thisj){ - const char *compressed_bytes; - char *uncompressed_bytes; - - // Get members of Lz4Decompressor - jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, Lz4Decompressor_compressedDirectBuf); - jint compressed_direct_buf_len = (*env)->GetIntField(env,thisj, Lz4Decompressor_compressedDirectBufLen); - jobject uncompressed_direct_buf = (*env)->GetObjectField(env,thisj, Lz4Decompressor_uncompressedDirectBuf); - size_t uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Decompressor_directBufferSize); - - // Get the input direct buffer - compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf); - - if (compressed_bytes == 0) { - return (jint)0; - } - - // Get the output direct buffer - uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf); - - if (uncompressed_bytes == 0) { - return (jint)0; - } - - uncompressed_direct_buf_len = LZ4_decompress_safe(compressed_bytes, uncompressed_bytes, compressed_direct_buf_len, uncompressed_direct_buf_len); - if (uncompressed_direct_buf_len < 0) { - THROW(env, "java/lang/InternalError", "LZ4_uncompress_unknownOutputSize failed."); - } - - (*env)->SetIntField(env, thisj, Lz4Decompressor_compressedDirectBufLen, 0); - - return (jint)uncompressed_direct_buf_len; -} diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c deleted file mode 100644 index 34a61733f258e..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c +++ /dev/null @@ -1,897 +0,0 @@ -/* - LZ4 HC - High Compression Mode of LZ4 - Copyright (C) 2011-2014, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html - - LZ4 source repository : http://code.google.com/p/lz4/ -*/ - - - -/************************************** - Tuning Parameter -**************************************/ -#define LZ4HC_DEFAULT_COMPRESSIONLEVEL 8 - - -/************************************** - Memory routines -**************************************/ -#include /* calloc, free */ -#define ALLOCATOR(s) calloc(1,s) -#define FREEMEM free -#include /* memset, memcpy */ -#define MEM_INIT memset - - -/************************************** - CPU Feature Detection -**************************************/ -/* 32 or 64 bits ? */ -#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \ - || defined(__64BIT__) || defined(__mips64) \ - || defined(__powerpc64__) || defined(__powerpc64le__) \ - || defined(__ppc64__) || defined(__ppc64le__) \ - || defined(__PPC64__) || defined(__PPC64LE__) \ - || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) \ - || defined(__s390x__) ) /* Detects 64 bits mode */ -# define LZ4_ARCH64 1 -#else -# define LZ4_ARCH64 0 -#endif - -/* - * Little Endian or Big Endian ? - * Overwrite the #define below if you know your architecture endianess - */ -#include /* Apparently required to detect endianess */ -#if defined (__GLIBC__) -# include -# if (__BYTE_ORDER == __BIG_ENDIAN) -# define LZ4_BIG_ENDIAN 1 -# endif -#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN)) -# define LZ4_BIG_ENDIAN 1 -#elif defined(__sparc) || defined(__sparc__) \ - || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \ - || defined(__hpux) || defined(__hppa) \ - || defined(_MIPSEB) || defined(__s390__) -# define LZ4_BIG_ENDIAN 1 -#else -/* Little Endian assumed. PDP Endian and other very rare endian format are unsupported. */ -#endif - -/* - * Unaligned memory access is automatically enabled for "common" CPU, such as x86. - * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected - * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance - */ -#if defined(__ARM_FEATURE_UNALIGNED) -# define LZ4_FORCE_UNALIGNED_ACCESS 1 -#endif - -/* Define this parameter if your target system or compiler does not support hardware bit count */ -#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */ -# define LZ4_FORCE_SW_BITCOUNT -#endif - - -/************************************** - Compiler Options -**************************************/ -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */ -/* "restrict" is a known keyword */ -#else -# define restrict /* Disable restrict */ -#endif - -#ifdef _MSC_VER /* Visual Studio */ -# define FORCE_INLINE static __forceinline -# include /* For Visual 2005 */ -# if LZ4_ARCH64 /* 64-bits */ -# pragma intrinsic(_BitScanForward64) /* For Visual 2005 */ -# pragma intrinsic(_BitScanReverse64) /* For Visual 2005 */ -# else /* 32-bits */ -# pragma intrinsic(_BitScanForward) /* For Visual 2005 */ -# pragma intrinsic(_BitScanReverse) /* For Visual 2005 */ -# endif -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# pragma warning(disable : 4701) /* disable: C4701: potentially uninitialized local variable used */ -#else -# ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define FORCE_INLINE static inline -# endif -#endif - -#ifdef _MSC_VER /* Visual Studio */ -# define lz4_bswap16(x) _byteswap_ushort(x) -#else -# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))) -#endif - - -/************************************** - Includes -**************************************/ -#include "lz4hc.h" -#include "lz4.h" - - -/************************************** - Basic Types -**************************************/ -#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */ -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; -#else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; - typedef signed int S32; - typedef unsigned long long U64; -#endif - -#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS) -# define _PACKED __attribute__ ((packed)) -#else -# define _PACKED -#endif - -#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__) -# ifdef __IBMC__ -# pragma pack(1) -# else -# pragma pack(push, 1) -# endif -#endif - -typedef struct _U16_S { U16 v; } _PACKED U16_S; -typedef struct _U32_S { U32 v; } _PACKED U32_S; -typedef struct _U64_S { U64 v; } _PACKED U64_S; - -#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__) -# pragma pack(pop) -#endif - -#define A64(x) (((U64_S *)(x))->v) -#define A32(x) (((U32_S *)(x))->v) -#define A16(x) (((U16_S *)(x))->v) - - -/************************************** - Constants -**************************************/ -#define MINMATCH 4 - -#define DICTIONARY_LOGSIZE 16 -#define MAXD (1<> ((MINMATCH*8)-HASH_LOG)) -#define HASH_VALUE(p) HASH_FUNCTION(A32(p)) -#define HASH_POINTER(p) (HashTable[HASH_VALUE(p)] + base) -#define DELTANEXT(p) chainTable[(size_t)(p) & MAXD_MASK] -#define GETNEXT(p) ((p) - (size_t)DELTANEXT(p)) - - -/************************************** - Private functions -**************************************/ -#if LZ4_ARCH64 - -FORCE_INLINE int LZ4_NbCommonBytes (register U64 val) -{ -#if defined(LZ4_BIG_ENDIAN) -# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanReverse64( &r, val ); - return (int)(r>>3); -# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clzll(val) >> 3); -# else - int r; - if (!(val>>32)) { r=4; } else { r=0; val>>=32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -# endif -#else -# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanForward64( &r, val ); - return (int)(r>>3); -# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctzll(val) >> 3); -# else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58]; -# endif -#endif -} - -#else - -FORCE_INLINE int LZ4_NbCommonBytes (register U32 val) -{ -#if defined(LZ4_BIG_ENDIAN) -# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r; - _BitScanReverse( &r, val ); - return (int)(r>>3); -# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_clz(val) >> 3); -# else - int r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; -# endif -#else -# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r; - _BitScanForward( &r, val ); - return (int)(r>>3); -# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (__builtin_ctz(val) >> 3); -# else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif -#endif -} - -#endif - - -int LZ4_sizeofStreamStateHC() -{ - return sizeof(LZ4HC_Data_Structure); -} - -FORCE_INLINE void LZ4_initHC (LZ4HC_Data_Structure* hc4, const BYTE* base) -{ - MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable)); - MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); - hc4->nextToUpdate = base + 1; - hc4->base = base; - hc4->inputBuffer = base; - hc4->end = base; -} - -int LZ4_resetStreamStateHC(void* state, const char* inputBuffer) -{ - if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1; /* Error : pointer is not aligned for pointer (32 or 64 bits) */ - LZ4_initHC((LZ4HC_Data_Structure*)state, (const BYTE*)inputBuffer); - return 0; -} - - -void* LZ4_createHC (const char* inputBuffer) -{ - void* hc4 = ALLOCATOR(sizeof(LZ4HC_Data_Structure)); - LZ4_initHC ((LZ4HC_Data_Structure*)hc4, (const BYTE*)inputBuffer); - return hc4; -} - - -int LZ4_freeHC (void* LZ4HC_Data) -{ - FREEMEM(LZ4HC_Data); - return (0); -} - - -/* Update chains up to ip (excluded) */ -FORCE_INLINE void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip) -{ - U16* chainTable = hc4->chainTable; - HTYPE* HashTable = hc4->hashTable; - INITBASE(base,hc4->base); - - while(hc4->nextToUpdate < ip) - { - const BYTE* const p = hc4->nextToUpdate; - size_t delta = (p) - HASH_POINTER(p); - if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; - DELTANEXT(p) = (U16)delta; - HashTable[HASH_VALUE(p)] = (HTYPE)((p) - base); - hc4->nextToUpdate++; - } -} - - -char* LZ4_slideInputBufferHC(void* LZ4HC_Data) -{ - LZ4HC_Data_Structure* hc4 = (LZ4HC_Data_Structure*)LZ4HC_Data; - size_t distance = (hc4->end - 64 KB) - hc4->inputBuffer; - - if (hc4->end <= hc4->inputBuffer + 64 KB) return (char*)(hc4->end); /* no update : less than 64KB within buffer */ - - distance = (distance >> 16) << 16; /* Must be a multiple of 64 KB */ - LZ4HC_Insert(hc4, hc4->end - MINMATCH); - memcpy((void*)(hc4->end - 64 KB - distance), (const void*)(hc4->end - 64 KB), 64 KB); - hc4->nextToUpdate -= distance; - hc4->base -= distance; - if ((U32)(hc4->inputBuffer - hc4->base) > 1 GB + 64 KB) /* Avoid overflow */ - { - int i; - hc4->base += 1 GB; - for (i=0; ihashTable[i] -= 1 GB; - } - hc4->end -= distance; - return (char*)(hc4->end); -} - - -FORCE_INLINE size_t LZ4HC_CommonLength (const BYTE* p1, const BYTE* p2, const BYTE* const matchlimit) -{ - const BYTE* p1t = p1; - - while (p1tchainTable; - HTYPE* const HashTable = hc4->hashTable; - const BYTE* ref; - INITBASE(base,hc4->base); - int nbAttempts=maxNbAttempts; - size_t repl=0, ml=0; - U16 delta=0; /* useless assignment, to remove an uninitialization warning */ - - /* HC4 match finder */ - LZ4HC_Insert(hc4, ip); - ref = HASH_POINTER(ip); - -#define REPEAT_OPTIMIZATION -#ifdef REPEAT_OPTIMIZATION - /* Detect repetitive sequences of length <= 4 */ - if ((U32)(ip-ref) <= 4) /* potential repetition */ - { - if (A32(ref) == A32(ip)) /* confirmed */ - { - delta = (U16)(ip-ref); - repl = ml = LZ4HC_CommonLength(ip+MINMATCH, ref+MINMATCH, matchlimit) + MINMATCH; - *matchpos = ref; - } - ref = GETNEXT(ref); - } -#endif - - while (((U32)(ip-ref) <= MAX_DISTANCE) && (nbAttempts)) - { - nbAttempts--; - if (*(ref+ml) == *(ip+ml)) - if (A32(ref) == A32(ip)) - { - size_t mlt = LZ4HC_CommonLength(ip+MINMATCH, ref+MINMATCH, matchlimit) + MINMATCH; - if (mlt > ml) { ml = mlt; *matchpos = ref; } - } - ref = GETNEXT(ref); - } - -#ifdef REPEAT_OPTIMIZATION - /* Complete table */ - if (repl) - { - const BYTE* ptr = ip; - const BYTE* end; - - end = ip + repl - (MINMATCH-1); - while(ptr < end-delta) - { - DELTANEXT(ptr) = delta; /* Pre-Load */ - ptr++; - } - do - { - DELTANEXT(ptr) = delta; - HashTable[HASH_VALUE(ptr)] = (HTYPE)((ptr) - base); /* Head of chain */ - ptr++; - } while(ptr < end); - hc4->nextToUpdate = end; - } -#endif - - return (int)ml; -} - - -FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos, const int maxNbAttempts) -{ - U16* const chainTable = hc4->chainTable; - HTYPE* const HashTable = hc4->hashTable; - INITBASE(base,hc4->base); - const BYTE* ref; - int nbAttempts = maxNbAttempts; - int delta = (int)(ip-startLimit); - - /* First Match */ - LZ4HC_Insert(hc4, ip); - ref = HASH_POINTER(ip); - - while (((U32)(ip-ref) <= MAX_DISTANCE) && (nbAttempts)) - { - nbAttempts--; - if (*(startLimit + longest) == *(ref - delta + longest)) - if (A32(ref) == A32(ip)) - { -#if 1 - const BYTE* reft = ref+MINMATCH; - const BYTE* ipt = ip+MINMATCH; - const BYTE* startt = ip; - - while (iptstartLimit) && (reft > hc4->inputBuffer) && (startt[-1] == reft[-1])) {startt--; reft--;} - - if ((ipt-startt) > longest) - { - longest = (int)(ipt-startt); - *matchpos = reft; - *startpos = startt; - } - } - ref = GETNEXT(ref); - } - - return longest; -} - - -typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive; - -FORCE_INLINE int LZ4HC_encodeSequence ( - const BYTE** ip, - BYTE** op, - const BYTE** anchor, - int matchLength, - const BYTE* ref, - limitedOutput_directive limitedOutputBuffer, - BYTE* oend) -{ - int length; - BYTE* token; - - /* Encode Literal length */ - length = (int)(*ip - *anchor); - token = (*op)++; - if ((limitedOutputBuffer) && ((*op + length + (2 + 1 + LASTLITERALS) + (length>>8)) > oend)) return 1; /* Check output limit */ - if (length>=(int)RUN_MASK) { int len; *token=(RUN_MASK< 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; } - else *token = (BYTE)(length<>8) > oend)) return 1; /* Check output limit */ - if (length>=(int)ML_MASK) { *token+=ML_MASK; length-=ML_MASK; for(; length > 509 ; length-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (length > 254) { length-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)length; } - else *token += (BYTE)(length); - - /* Prepare next loop */ - *ip += matchLength; - *anchor = *ip; - - return 0; -} - - -#define MAX_COMPRESSION_LEVEL 16 -static int LZ4HC_compress_generic ( - void* ctxvoid, - const char* source, - char* dest, - int inputSize, - int maxOutputSize, - int compressionLevel, - limitedOutput_directive limit - ) -{ - LZ4HC_Data_Structure* ctx = (LZ4HC_Data_Structure*) ctxvoid; - const BYTE* ip = (const BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const iend = ip + inputSize; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = (iend - LASTLITERALS); - - BYTE* op = (BYTE*) dest; - BYTE* const oend = op + maxOutputSize; - - const int maxNbAttempts = compressionLevel > MAX_COMPRESSION_LEVEL ? 1 << MAX_COMPRESSION_LEVEL : compressionLevel ? 1<<(compressionLevel-1) : 1<end) return 0; - ctx->end += inputSize; - - ip++; - - /* Main Loop */ - while (ip < mflimit) - { - ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref), maxNbAttempts); - if (!ml) { ip++; continue; } - - /* saved, in case we would skip too much */ - start0 = ip; - ref0 = ref; - ml0 = ml; - -_Search2: - if (ip+ml < mflimit) - ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2, maxNbAttempts); - else ml2 = ml; - - if (ml2 == ml) /* No better match */ - { - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0; - continue; - } - - if (start0 < ip) - { - if (start2 < ip + ml0) /* empirical */ - { - ip = start0; - ref = ref0; - ml = ml0; - } - } - - /* Here, start0==ip */ - if ((start2 - ip) < 3) /* First Match too small : removed */ - { - ml = ml2; - ip = start2; - ref =ref2; - goto _Search2; - } - -_Search3: - /* - * Currently we have : - * ml2 > ml1, and - * ip1+3 <= ip2 (usually < ip1+ml1) - */ - if ((start2 - ip) < OPTIMAL_ML) - { - int correction; - int new_ml = ml; - if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; - if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH; - correction = new_ml - (int)(start2 - ip); - if (correction > 0) - { - start2 += correction; - ref2 += correction; - ml2 -= correction; - } - } - /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */ - - if (start2 + ml2 < mflimit) - ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, maxNbAttempts); - else ml3 = ml2; - - if (ml3 == ml2) /* No better match : 2 sequences to encode */ - { - /* ip & ref are known; Now for ml */ - if (start2 < ip+ml) ml = (int)(start2 - ip); - /* Now, encode 2 sequences */ - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0; - ip = start2; - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml2, ref2, limit, oend)) return 0; - continue; - } - - if (start3 < ip+ml+3) /* Not enough space for match 2 : remove it */ - { - if (start3 >= (ip+ml)) /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */ - { - if (start2 < ip+ml) - { - int correction = (int)(ip+ml - start2); - start2 += correction; - ref2 += correction; - ml2 -= correction; - if (ml2 < MINMATCH) - { - start2 = start3; - ref2 = ref3; - ml2 = ml3; - } - } - - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0; - ip = start3; - ref = ref3; - ml = ml3; - - start0 = start2; - ref0 = ref2; - ml0 = ml2; - goto _Search2; - } - - start2 = start3; - ref2 = ref3; - ml2 = ml3; - goto _Search3; - } - - /* - * OK, now we have 3 ascending matches; let's write at least the first one - * ip & ref are known; Now for ml - */ - if (start2 < ip+ml) - { - if ((start2 - ip) < (int)ML_MASK) - { - int correction; - if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; - if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH; - correction = ml - (int)(start2 - ip); - if (correction > 0) - { - start2 += correction; - ref2 += correction; - ml2 -= correction; - } - } - else - { - ml = (int)(start2 - ip); - } - } - if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0; - - ip = start2; - ref = ref2; - ml = ml2; - - start2 = start3; - ref2 = ref3; - ml2 = ml3; - - goto _Search3; - - } - - /* Encode Last Literals */ - { - int lastRun = (int)(iend - anchor); - if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; /* Check output limit */ - if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } - else *op++ = (BYTE)(lastRun< The memory position where the next input data block must start is provided as the result of the function. - -Compression can then resume, using LZ4_compressHC_continue() or LZ4_compressHC_limitedOutput_continue(), as usual. - -When compression is completed, a call to LZ4_freeHC() will release the memory used by the LZ4HC Data Structure. -*/ - -int LZ4_sizeofStreamStateHC(void); -int LZ4_resetStreamStateHC(void* state, const char* inputBuffer); - -/* -These functions achieve the same result as : -void* LZ4_createHC (const char* inputBuffer); - -They are provided here to allow the user program to allocate memory using its own routines. - -To know how much space must be allocated, use LZ4_sizeofStreamStateHC(); -Note also that space must be aligned for pointers (32 or 64 bits). - -Once space is allocated, you must initialize it using : LZ4_resetStreamStateHC(void* state, const char* inputBuffer); -void* state is a pointer to the space allocated. -It must be aligned for pointers (32 or 64 bits), and be large enough. -The parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer. -The input buffer must be already allocated, and size at least 192KB. -'inputBuffer' will also be the 'const char* source' of the first block. - -The same space can be re-used multiple times, just by initializing it each time with LZ4_resetStreamState(). -return value of LZ4_resetStreamStateHC() must be 0 is OK. -Any other value means there was an error (typically, state is not aligned for pointers (32 or 64 bits)). -*/ - - -#if defined (__cplusplus) -} -#endif diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c deleted file mode 100644 index 9a09f078d8260..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#include "org_apache_hadoop_io_compress_snappy.h" - -#if defined HADOOP_SNAPPY_LIBRARY - -#include -#include -#include - -#ifdef UNIX -#include -#include "config.h" -#endif // UNIX - -#ifdef WINDOWS -#include "winutils.h" -#endif - -#include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h" - -#define JINT_MAX 0x7fffffff - -static jfieldID SnappyCompressor_uncompressedDirectBuf; -static jfieldID SnappyCompressor_uncompressedDirectBufLen; -static jfieldID SnappyCompressor_compressedDirectBuf; -static jfieldID SnappyCompressor_directBufferSize; - -#ifdef UNIX -static snappy_status (*dlsym_snappy_compress)(const char*, size_t, char*, size_t*); -#endif - -#ifdef WINDOWS -typedef snappy_status (__cdecl *__dlsym_snappy_compress)(const char*, size_t, char*, size_t*); -static __dlsym_snappy_compress dlsym_snappy_compress; -#endif - -JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_initIDs -(JNIEnv *env, jclass clazz){ -#ifdef UNIX - // Load libsnappy.so - void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); - if (!libsnappy) { - char msg[1000]; - snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_SNAPPY_LIBRARY, dlerror()); - THROW(env, "java/lang/UnsatisfiedLinkError", msg); - return; - } -#endif - -#ifdef WINDOWS - HMODULE libsnappy = LoadLibrary(HADOOP_SNAPPY_LIBRARY); - if (!libsnappy) { - THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load snappy.dll"); - return; - } -#endif - - // Locate the requisite symbols from libsnappy.so -#ifdef UNIX - dlerror(); // Clear any existing error - LOAD_DYNAMIC_SYMBOL(dlsym_snappy_compress, env, libsnappy, "snappy_compress"); -#endif - -#ifdef WINDOWS - LOAD_DYNAMIC_SYMBOL(__dlsym_snappy_compress, dlsym_snappy_compress, env, libsnappy, "snappy_compress"); -#endif - - SnappyCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, clazz, - "uncompressedDirectBuf", - "Ljava/nio/Buffer;"); - SnappyCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, clazz, - "uncompressedDirectBufLen", "I"); - SnappyCompressor_compressedDirectBuf = (*env)->GetFieldID(env, clazz, - "compressedDirectBuf", - "Ljava/nio/Buffer;"); - SnappyCompressor_directBufferSize = (*env)->GetFieldID(env, clazz, - "directBufferSize", "I"); -} - -JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_compressBytesDirect -(JNIEnv *env, jobject thisj){ - const char* uncompressed_bytes; - char* compressed_bytes; - snappy_status ret; - // Get members of SnappyCompressor - jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_uncompressedDirectBuf); - jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen); - jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_compressedDirectBuf); - jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_directBufferSize); - size_t buf_len; - - // Get the input direct buffer - uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf); - - if (uncompressed_bytes == 0) { - return (jint)0; - } - - // Get the output direct buffer - compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf); - - if (compressed_bytes == 0) { - return (jint)0; - } - - /* size_t should always be 4 bytes or larger. */ - buf_len = (size_t)compressed_direct_buf_len; - ret = dlsym_snappy_compress(uncompressed_bytes, uncompressed_direct_buf_len, - compressed_bytes, &buf_len); - if (ret != SNAPPY_OK){ - THROW(env, "java/lang/InternalError", "Could not compress data. Buffer length is too small."); - return 0; - } - if (buf_len > JINT_MAX) { - THROW(env, "java/lang/InternalError", "Invalid return buffer length."); - return 0; - } - - (*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0); - return (jint)buf_len; -} - -JNIEXPORT jstring JNICALL -Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_getLibraryName(JNIEnv *env, jclass class) { -#ifdef UNIX - if (dlsym_snappy_compress) { - Dl_info dl_info; - if(dladdr( - dlsym_snappy_compress, - &dl_info)) { - return (*env)->NewStringUTF(env, dl_info.dli_fname); - } - } - - return (*env)->NewStringUTF(env, HADOOP_SNAPPY_LIBRARY); -#endif - -#ifdef WINDOWS - LPWSTR filename = NULL; - GetLibraryName(dlsym_snappy_compress, &filename); - if (filename != NULL) { - return (*env)->NewString(env, filename, (jsize) wcslen(filename)); - } else { - return (*env)->NewStringUTF(env, "Unavailable"); - } -#endif -} -#endif //define HADOOP_SNAPPY_LIBRARY diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c deleted file mode 100644 index 69ec1017526fd..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "org_apache_hadoop_io_compress_snappy.h" - -#if defined HADOOP_SNAPPY_LIBRARY - -#include -#include -#include - -#ifdef UNIX -#include -#include "config.h" -#endif - -#include "org_apache_hadoop_io_compress_snappy_SnappyDecompressor.h" - -static jfieldID SnappyDecompressor_compressedDirectBuf; -static jfieldID SnappyDecompressor_compressedDirectBufLen; -static jfieldID SnappyDecompressor_uncompressedDirectBuf; -static jfieldID SnappyDecompressor_directBufferSize; - -#ifdef UNIX -static snappy_status (*dlsym_snappy_uncompress)(const char*, size_t, char*, size_t*); -#endif - -#ifdef WINDOWS -typedef snappy_status (__cdecl *__dlsym_snappy_uncompress)(const char*, size_t, char*, size_t*); -static __dlsym_snappy_uncompress dlsym_snappy_uncompress; -#endif - -JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_initIDs -(JNIEnv *env, jclass clazz){ - - // Load libsnappy.so -#ifdef UNIX - void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); - if (!libsnappy) { - char* msg = (char*)malloc(1000); - snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_SNAPPY_LIBRARY, dlerror()); - THROW(env, "java/lang/UnsatisfiedLinkError", msg); - return; - } -#endif - -#ifdef WINDOWS - HMODULE libsnappy = LoadLibrary(HADOOP_SNAPPY_LIBRARY); - if (!libsnappy) { - THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load snappy.dll"); - return; - } -#endif - - // Locate the requisite symbols from libsnappy.so -#ifdef UNIX - dlerror(); // Clear any existing error - LOAD_DYNAMIC_SYMBOL(dlsym_snappy_uncompress, env, libsnappy, "snappy_uncompress"); - -#endif - -#ifdef WINDOWS - LOAD_DYNAMIC_SYMBOL(__dlsym_snappy_uncompress, dlsym_snappy_uncompress, env, libsnappy, "snappy_uncompress"); -#endif - - SnappyDecompressor_compressedDirectBuf = (*env)->GetFieldID(env,clazz, - "compressedDirectBuf", - "Ljava/nio/Buffer;"); - SnappyDecompressor_compressedDirectBufLen = (*env)->GetFieldID(env,clazz, - "compressedDirectBufLen", "I"); - SnappyDecompressor_uncompressedDirectBuf = (*env)->GetFieldID(env,clazz, - "uncompressedDirectBuf", - "Ljava/nio/Buffer;"); - SnappyDecompressor_directBufferSize = (*env)->GetFieldID(env, clazz, - "directBufferSize", "I"); -} - -JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_decompressBytesDirect -(JNIEnv *env, jobject thisj){ - const char* compressed_bytes = NULL; - char* uncompressed_bytes = NULL; - snappy_status ret; - // Get members of SnappyDecompressor - jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_compressedDirectBuf); - jint compressed_direct_buf_len = (*env)->GetIntField(env,thisj, SnappyDecompressor_compressedDirectBufLen); - jobject uncompressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_uncompressedDirectBuf); - size_t uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyDecompressor_directBufferSize); - - // Get the input direct buffer - compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf); - - if (compressed_bytes == 0) { - return (jint)0; - } - - // Get the output direct buffer - uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf); - - if (uncompressed_bytes == 0) { - return (jint)0; - } - - ret = dlsym_snappy_uncompress(compressed_bytes, compressed_direct_buf_len, - uncompressed_bytes, &uncompressed_direct_buf_len); - if (ret == SNAPPY_BUFFER_TOO_SMALL){ - THROW(env, "java/lang/InternalError", "Could not decompress data. Buffer length is too small."); - } else if (ret == SNAPPY_INVALID_INPUT){ - THROW(env, "java/lang/InternalError", "Could not decompress data. Input is invalid."); - } else if (ret != SNAPPY_OK){ - THROW(env, "java/lang/InternalError", "Could not decompress data."); - } - - (*env)->SetIntField(env, thisj, SnappyDecompressor_compressedDirectBufLen, 0); - - return (jint)uncompressed_direct_buf_len; -} - -#endif //define HADOOP_SNAPPY_LIBRARY diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c index 41eb9e2c85a10..6581f292b4a00 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c @@ -219,13 +219,13 @@ JNIEXPORT jint Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_defla return (jint) 0; } - bytes_read += input.pos; + bytes_read += input.pos - uncompressed_direct_buf_off; bytes_written += output.pos; (*env)->SetLongField(env, this, ZStandardCompressor_bytesRead, bytes_read); (*env)->SetLongField(env, this, ZStandardCompressor_bytesWritten, bytes_written); (*env)->SetIntField(env, this, ZStandardCompressor_uncompressedDirectBufOff, input.pos); - (*env)->SetIntField(env, this, ZStandardCompressor_uncompressedDirectBufLen, input.size - input.pos); + (*env)->SetIntField(env, this, ZStandardCompressor_uncompressedDirectBufLen, input.size); return (jint) output.pos; } diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_common.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_common.c index 9cca6dd754b09..816536b637d39 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_common.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_common.c @@ -92,6 +92,7 @@ void getInputs(JNIEnv *env, jobjectArray inputs, jintArray inputOffsets, destInputs[i] = NULL; } } + (*env)->ReleaseIntArrayElements(env, inputOffsets, tmpInputOffsets, 0); } void getOutputs(JNIEnv *env, jobjectArray outputs, jintArray outputOffsets, @@ -112,4 +113,5 @@ void getOutputs(JNIEnv *env, jobjectArray outputs, jintArray outputOffsets, byteBuffer)); destOutputs[i] += tmpOutputOffsets[i]; } -} \ No newline at end of file + (*env)->ReleaseIntArrayElements(env, outputOffsets, tmpOutputOffsets, 0); +} diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_rs_decoder.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_rs_decoder.c index 52d255afd58d8..72314d2ad545a 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_rs_decoder.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_rs_decoder.c @@ -66,6 +66,7 @@ jintArray outputOffsets) { decode(&rsDecoder->decoder, rsDecoder->inputs, tmpErasedIndexes, numErased, rsDecoder->outputs, chunkSize); + (*env)->ReleaseIntArrayElements(env, erasedIndexes, tmpErasedIndexes, 0); } JNIEXPORT void JNICALL diff --git a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto new file mode 100644 index 0000000000000..16ee880e7b720 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine2.proto @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * These .proto interfaces are private and stable. + * Please see http://wiki.apache.org/hadoop/Compatibility + * for what changes are allowed for a *stable* .proto interface. + */ +syntax = "proto2"; +/** + * These are the messages used by Hadoop RPC for the Rpc Engine Protocol Buffer + * to marshal the request and response in the RPC layer. + * The messages are sent in addition to the normal RPC header as + * defined in RpcHeader.proto + */ +option java_package = "org.apache.hadoop.ipc.protobuf"; +option java_outer_classname = "ProtobufRpcEngine2Protos"; +option java_generate_equals_and_hash = true; +package hadoop.common; + +/** + * This message is the header for the Protobuf Rpc Engine + * when sending a RPC request from RPC client to the RPC server. + * The actual request (serialized as protobuf) follows this request. + * + * No special header is needed for the Rpc Response for Protobuf Rpc Engine. + * The normal RPC response header (see RpcHeader.proto) are sufficient. + */ +message RequestHeaderProto { + /** Name of the RPC method */ + required string methodName = 1; + + /** + * RPCs for a particular interface (ie protocol) are done using a + * IPC connection that is setup using rpcProxy. + * The rpcProxy's has a declared protocol name that is + * sent form client to server at connection time. + * + * Each Rpc call also sends a protocol name + * (called declaringClassprotocolName). This name is usually the same + * as the connection protocol name except in some cases. + * For example metaProtocols such ProtocolInfoProto which get metainfo + * about the protocol reuse the connection but need to indicate that + * the actual protocol is different (i.e. the protocol is + * ProtocolInfoProto) since they reuse the connection; in this case + * the declaringClassProtocolName field is set to the ProtocolInfoProto + */ + required string declaringClassProtocolName = 2; + + /** protocol version of class declaring the called method */ + required uint64 clientProtocolVersion = 3; +} diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory deleted file mode 100644 index f0054fedb8e1c..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.fs.FileSystemMultipartUploader$Factory diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index aba3b52dfc9b5..552e28ee488db 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -48,6 +48,14 @@ ordering of the filters. + + hadoop.http.idle_timeout.ms + 60000 + + NN/JN/DN Server connection timeout in milliseconds. + + + @@ -944,6 +952,124 @@ + + fs.viewfs.overload.scheme.target.hdfs.impl + org.apache.hadoop.hdfs.DistributedFileSystem + The DistributedFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are hdfs. + + + + + fs.viewfs.overload.scheme.target.s3a.impl + org.apache.hadoop.fs.s3a.S3AFileSystem + The S3AFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are s3a. + + + + fs.viewfs.overload.scheme.target.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + The RootedOzoneFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are ofs. + + + + + fs.viewfs.overload.scheme.target.o3fs.impl + org.apache.hadoop.fs.ozone.OzoneFileSystem + The OzoneFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are o3fs. + + + + fs.viewfs.overload.scheme.target.ftp.impl + org.apache.hadoop.fs.ftp.FTPFileSystem + The FTPFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are ftp. + + + + + fs.viewfs.overload.scheme.target.webhdfs.impl + org.apache.hadoop.hdfs.web.WebHdfsFileSystem + The WebHdfsFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are webhdfs. + + + + + fs.viewfs.overload.scheme.target.swebhdfs.impl + org.apache.hadoop.hdfs.web.SWebHdfsFileSystem + The SWebHdfsFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are swebhdfs. + + + + + fs.viewfs.overload.scheme.target.file.impl + org.apache.hadoop.fs.LocalFileSystem + The LocalFileSystem for view file system overload scheme when + child file system and ViewFSOverloadScheme's schemes are file. + + + + + fs.viewfs.overload.scheme.target.abfs.impl + org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem + The AzureBlobFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are abfs. + + + + + fs.viewfs.overload.scheme.target.abfss.impl + org.apache.hadoop.fs.azurebfs.SecureAzureBlobFileSystem + The SecureAzureBlobFileSystem for view file system overload + scheme when child file system and ViewFSOverloadScheme's schemes are abfss. + + + + + fs.viewfs.overload.scheme.target.wasb.impl + org.apache.hadoop.fs.azure.NativeAzureFileSystem + The NativeAzureFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are wasb. + + + + + fs.viewfs.overload.scheme.target.swift.impl + org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem + The SwiftNativeFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are swift. + + + + + fs.viewfs.overload.scheme.target.oss.impl + org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem + The AliyunOSSFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are oss. + + + + + fs.viewfs.overload.scheme.target.http.impl + org.apache.hadoop.fs.http.HttpFileSystem + The HttpFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are http. + + + + + fs.viewfs.overload.scheme.target.https.impl + org.apache.hadoop.fs.http.HttpsFileSystem + The HttpsFileSystem for view file system overload scheme + when child file system and ViewFSOverloadScheme's schemes are https. + + + fs.AbstractFileSystem.ftp.impl org.apache.hadoop.fs.ftp.FtpFs @@ -1747,11 +1873,9 @@ fs.s3a.committer.magic.enabled - false + true - Enable support in the filesystem for the S3 "Magic" committer. - When working with AWS S3, S3Guard must be enabled for the destination - bucket, as consistent metadata listings are required. + Enable support in the S3A filesystem for the "Magic" committer. @@ -1799,20 +1923,13 @@ - fs.s3a.committer.staging.abort.pending.uploads + fs.s3a.committer.abort.pending.uploads true - Should the staging committers abort all pending uploads to the destination + Should the committers abort all pending uploads to the destination directory? - Changing this if more than one partitioned committer is - writing to the same destination tree simultaneously; otherwise - the first job to complete will cancel all outstanding uploads from the - others. However, it may lead to leaked outstanding uploads from failed - tasks. If disabled, configure the bucket lifecycle to remove uploads - after a time period, and/or set up a workflow to explicitly delete - entries. Otherwise there is a risk that uncommitted uploads may run up - bills. + Set to false if more than one job is writing to the same directory tree. @@ -2719,7 +2836,7 @@ ${user.home}/hadoop-http-auth-signature-secret The signature secret for signing the authentication tokens. - The same secret should be used for RM/NM/NN/DN configurations. + A different secret should be used for each service. diff --git a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties index cbbb88764d1f8..dc37949851cf5 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties +++ b/hadoop-common-project/hadoop-common/src/main/resources/org.apache.hadoop.application-classloader.properties @@ -19,7 +19,7 @@ # contains key properties for setting up the application classloader system.classes.default=java.,\ javax.accessibility.,\ - javax.activation.,\ + -javax.activation.,\ javax.activity.,\ javax.annotation.,\ javax.annotation.processing.,\ diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md index 7f61d3bd45592..6243dfc2a8de2 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md @@ -156,7 +156,7 @@ This section deals with important parameters to be specified in the given config | `yarn.nodemanager.remote-app-log-dir` | */logs* | HDFS directory where the application logs are moved on application completion. Need to set appropriate permissions. Only applicable if log-aggregation is enabled. | | `yarn.nodemanager.remote-app-log-dir-suffix` | *logs* | Suffix appended to the remote log dir. Logs will be aggregated to ${yarn.nodemanager.remote-app-log-dir}/${user}/${thisParam} Only applicable if log-aggregation is enabled. | | `yarn.nodemanager.aux-services` | mapreduce\_shuffle | Shuffle service that needs to be set for Map Reduce applications. | -| `yarn.nodemanager.env-whitelist` | Environment properties to be inherited by containers from NodeManagers | For mapreduce application in addition to the default values HADOOP\_MAPRED_HOME should to be added. Property value should JAVA\_HOME,HADOOP\_COMMON\_HOME,HADOOP\_HDFS\_HOME,HADOOP\_CONF\_DIR,CLASSPATH\_PREPEND\_DISTCACHE,HADOOP\_YARN\_HOME,HADOOP\_MAPRED\_HOME | +| `yarn.nodemanager.env-whitelist` | Environment properties to be inherited by containers from NodeManagers | For mapreduce application in addition to the default values HADOOP\_MAPRED_HOME should to be added. Property value should JAVA\_HOME,HADOOP\_COMMON\_HOME,HADOOP\_HDFS\_HOME,HADOOP\_CONF\_DIR,CLASSPATH\_PREPEND\_DISTCACHE,HADOOP\_YARN\_HOME,HADOOP\_HOME,PATH,LANG,TZ,HADOOP\_MAPRED\_HOME | * Configurations for History Server (Needs to be moved elsewhere): diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md index 0bda253fc8b54..4842d5b86d621 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md @@ -60,7 +60,7 @@ Many subcommands honor a common set of configuration options to alter their beha | `-files ` | Specify comma separated files to be copied to the map reduce cluster. Applies only to job. | | `-fs or ` | Specify default filesystem URL to use. Overrides 'fs.defaultFS' property from configurations. | | `-jt or ` | Specify a ResourceManager. Applies only to job. | -| `-libjars ` | Specify comma separated jar files to include in the classpath. Applies only to job. | +| `-libjars ` | Specify comma separated jar files to include in the classpath. Applies only to job. | Hadoop Common Commands ====================== diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md index 7df2cce574b68..4f58e507b7d83 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md @@ -122,25 +122,12 @@ Options copyFromLocal ------------- -Usage: `hadoop fs -copyFromLocal URI` - -Similar to the `fs -put` command, except that the source is restricted to a local file reference. - -Options: - -* `-p` : Preserves access and modification times, ownership and the permissions. -(assuming the permissions can be propagated across filesystems) -* `-f` : Overwrites the destination if it already exists. -* `-l` : Allow DataNode to lazily persist the file to disk, Forces a replication - factor of 1. This flag will result in reduced durability. Use with care. -* `-d` : Skip creation of temporary file with the suffix `._COPYING_`. +Identical to the -put command. copyToLocal ----------- -Usage: `hadoop fs -copyToLocal [-ignorecrc] [-crc] URI ` - -Similar to get command, except that the destination is restricted to a local file reference. +Identical to the -get command. count ----- @@ -535,7 +522,7 @@ Returns 0 on success and -1 on error. put --- -Usage: `hadoop fs -put [-f] [-p] [-l] [-d] [ - | .. ]. ` +Usage: `hadoop fs -put [-f] [-p] [-l] [-d] [-t ] [ - | .. ]. ` Copy single src, or multiple srcs from local file system to the destination file system. Also reads input from stdin and writes to destination file system if the source is set to "-" @@ -547,6 +534,8 @@ Options: * `-p` : Preserves access and modification times, ownership and the permissions. (assuming the permissions can be propagated across filesystems) * `-f` : Overwrites the destination if it already exists. +* `-t ` : Number of threads to be used, default is 1. Useful + when uploading a directory containing more than 1 file. * `-l` : Allow DataNode to lazily persist the file to disk, Forces a replication factor of 1. This flag will result in reduced durability. Use with care. * `-d` : Skip creation of temporary file with the suffix `._COPYING_`. @@ -768,7 +757,7 @@ timestamp of that URI. * Use -a option to change only the access time * Use -m option to change only the modification time -* Use -t option to specify timestamp (in format yyyyMMddHHmmss) instead of current time +* Use -t option to specify timestamp (in format yyyyMMdd:HHmmss) instead of current time * Use -c option to not create file if it does not exist The timestamp format is as follows @@ -778,13 +767,13 @@ The timestamp format is as follows * HH Two digit hour of the day using 24 hour notation (e.g. 23 stands for 11 pm, 11 stands for 11 am) * mm Two digit minutes of the hour * ss Two digit seconds of the minute -e.g. 20180809230000 represents August 9th 2018, 11pm +e.g. 20180809:230000 represents August 9th 2018, 11pm Example: * `hadoop fs -touch pathname` -* `hadoop fs -touch -m -t 20180809230000 pathname` -* `hadoop fs -touch -t 20180809230000 pathname` +* `hadoop fs -touch -m -t 20180809:230000 pathname` +* `hadoop fs -touch -t 20180809:230000 pathname` * `hadoop fs -touch -a pathname` Exit Code: Returns 0 on success and -1 on error. @@ -821,6 +810,18 @@ Example: * `hadoop fs -truncate 55 /user/hadoop/file1 /user/hadoop/file2` * `hadoop fs -truncate -w 127 hdfs://nn1.example.com/user/hadoop/file1` +concat +-------- + +Usage: `hadoop fs -concat ` + +Concatenate existing source files into the target file. Target file and source +files should be in the same directory. + +Example: + +* `hadoop fs -concat hdfs://cluster/user/hadoop/target-file hdfs://cluster/user/hadoop/file-0 hdfs://cluster/user/hadoop/file-1` + usage ----- @@ -1100,6 +1101,7 @@ actually fail. | `setfattr` | generally unsupported permissions model | | `setrep`| has no effect | | `truncate` | generally unsupported | +| `concat` | generally unsupported | Different object store clients *may* support these commands: do consult the documentation and test against the target store. diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md index ca5ce4898aa71..0c131ef3ea32b 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md @@ -43,7 +43,7 @@ The following properties should be in the `core-site.xml` of all the nodes in th | `hadoop.http.authentication.type` | `simple` | Defines authentication used for the HTTP web-consoles. The supported values are: `simple` \| `kerberos` \| `#AUTHENTICATION_HANDLER_CLASSNAME#`. | | `hadoop.http.authentication.token.validity` | `36000` | Indicates how long (in seconds) an authentication token is valid before it has to be renewed. | | `hadoop.http.authentication.token.max-inactive-interval` | `-1` (disabled) | Specifies the time, in seconds, between client requests the server will invalidate the token. | -| `hadoop.http.authentication.signature.secret.file` | `$user.home/hadoop-http-auth-signature-secret` | The signature secret file for signing the authentication tokens. The same secret should be used for all nodes in the cluster, ResourceManager, NameNode, DataNode and NodeManager. This file should be readable only by the Unix user running the daemons. | +| `hadoop.http.authentication.signature.secret.file` | `$user.home/hadoop-http-auth-signature-secret` | The signature secret file for signing the authentication tokens. A different secret should be used for each service in the cluster, ResourceManager, NameNode, DataNode and NodeManager. This file should be readable only by the Unix user running the daemons. | | `hadoop.http.authentication.cookie.domain` | | The domain to use for the HTTP cookie that stores the authentication token. For authentication to work correctly across all nodes in the cluster the domain must be correctly set. There is no default value, the HTTP cookie will not have a domain working only with the hostname issuing the HTTP cookie. | | `hadoop.http.authentication.cookie.persistent` | `false` (session cookie) | Specifies the persistence of the HTTP cookie. If the value is true, the cookie is a persistent one. Otherwise, it is a session cookie. *IMPORTANT*: when using IP addresses, browsers ignore cookies with domain settings. For this setting to work properly all nodes in the cluster must be configured to generate URLs with `hostname.domain` names on it. | | `hadoop.http.authentication.simple.anonymous.allowed` | `true` | Indicates whether anonymous requests are allowed when using 'simple' authentication. | diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/NativeLibraries.md.vm b/hadoop-common-project/hadoop-common/src/site/markdown/NativeLibraries.md.vm index e4f720cee8ce1..1e62e94394f91 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/NativeLibraries.md.vm +++ b/hadoop-common-project/hadoop-common/src/site/markdown/NativeLibraries.md.vm @@ -47,7 +47,7 @@ Components The native hadoop library includes various components: -* Compression Codecs (bzip2, lz4, snappy, zlib) +* Compression Codecs (bzip2, lz4, zlib) * Native IO utilities for [HDFS Short-Circuit Local Reads](../hadoop-hdfs/ShortCircuitLocalReads.html) and [Centralized Cache Management in HDFS](../hadoop-hdfs/CentralizedCacheManagement.html) * CRC32 checksum implementation @@ -117,7 +117,6 @@ NativeLibraryChecker is a tool to check whether native libraries are loaded corr Native library checking: hadoop: true /home/ozawa/hadoop/lib/native/libhadoop.so.1.0.0 zlib: true /lib/x86_64-linux-gnu/libz.so.1 - snappy: true /usr/lib/libsnappy.so.1 zstd: true /usr/lib/libzstd.so.1 lz4: true revision:99 bzip2: false diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm index 45c084bb543be..8d0a7d195a82f 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm +++ b/hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm @@ -206,7 +206,7 @@ The following instructions assume that 1. ~ 4. steps of [the above instructions] yarn.nodemanager.env-whitelist - JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME + JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ,HADOOP_MAPRED_HOME diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Superusers.md b/hadoop-common-project/hadoop-common/src/site/markdown/Superusers.md index 678d56b123c0f..56a763ad08e88 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Superusers.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Superusers.md @@ -80,7 +80,7 @@ If more lax security is preferred, the wildcard value \* may be used to allow im * -The `hadoop.proxyuser.$superuser.hosts` accepts list of ip addresses, ip address ranges in CIDR format and/or host names. For example, by specifying as below, user named `super` accessing from hosts in the range `10.222.0.0-15` and `10.113.221.221` can impersonate `user1` and `user2`. +The `hadoop.proxyuser.$superuser.hosts` accepts list of ip addresses, ip address ranges in CIDR format and/or host names. For example, by specifying as below, user named `super` accessing from hosts in the range `10.222.0.0-10.222.255.255` and `10.113.221.221` can impersonate `user1` and `user2`. hadoop.proxyuser.super.hosts diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/abortable.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/abortable.md new file mode 100644 index 0000000000000..7e6ea01a8fe9b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/abortable.md @@ -0,0 +1,186 @@ + + + + + + + +# interface `org.apache.hadoop.fs.Abortable` + + + +Abort the active operation such that the output does not become +manifest. + +Specifically, if supported on an [output stream](outputstream.html), +a successful `abort()` MUST guarantee that the stream will not be made visible in the `close()` +operation. + +```java + +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface Abortable { + + /** + * Abort the active operation without the output becoming visible. + * + * This is to provide ability to cancel the write on stream; once + * a stream is aborted, the write MUST NOT become visible. + * + * @throws UnsupportedOperationException if the operation is not supported. + * @return the result. + */ + AbortableResult abort(); + + /** + * Interface for the result of aborts; allows subclasses to extend + * (IOStatistics etc) or for future enhancements if ever needed. + */ + interface AbortableResult { + + /** + * Was the stream already closed/aborted? + * @return true if a close/abort operation had already + * taken place. + */ + boolean alreadyClosed(); + + /** + * Any exception caught during cleanup operations, + * exceptions whose raising/catching does not change + * the semantics of the abort. + * @return an exception or null. + */ + IOException anyCleanupException(); + } +} +``` + +## Method `abort()` + +Aborts the ongoing operation such that no output SHALL become visible +when the operation is completed. + +Unless and until other File System classes implement `Abortable`, the +interface is specified purely for output streams. + +## Method `abort()` on an output stream + +`Abortable.abort()` MUST only be supported on output streams +whose output is only made visible when `close()` is called, +for example. output streams returned by the S3A FileSystem. + +## Preconditions + +The stream MUST implement `Abortable` and `StreamCapabilities`. + +```python + if unsupported: + throw UnsupportedException + +if not isOpen(stream): + no-op + +StreamCapabilities.hasCapability("fs.capability.outputstream.abortable") == True + +``` + + +## Postconditions + +After `abort()` returns, the filesystem MUST be unchanged: + +``` +FS' = FS +``` + +A successful `abort()` operation MUST guarantee that +when the stream` close()` is invoked no output shall be manifest. + +* The stream MUST retry any remote calls needed to force the abort outcome. +* If any file was present at the destination path, it MUST remain unchanged. + +Strictly then: + +> if `Abortable.abort()` does not raise `UnsupportedOperationException` +> then returns, then it guarantees that the write SHALL NOT become visible +> and that any existing data in the filesystem at the destination path SHALL +> continue to be available. + + +1. Calls to `write()` methods MUST fail. +1. Calls to `flush()` MUST be no-ops (applications sometimes call this on closed streams) +1. Subsequent calls to `abort()` MUST be no-ops. +1. `close()` MUST NOT manifest the file, and MUST NOT raise an exception + +That is, the postconditions of `close()` becomes: + +``` +FS' = FS +``` + +### Cleanup + +* If temporary data is stored in the local filesystem or in the store's upload + infrastructure then this MAY be cleaned up; best-effort is expected here. + +* The stream SHOULD NOT retry cleanup operations; any failure there MUST be + caught and added to `AbortResult` + +#### Returned `AbortResult` + +The `AbortResult` value returned is primarily for testing and logging. + +`alreadyClosed()`: MUST return `true` if the write had already been aborted or closed; + +`anyCleanupException();`: SHOULD return any IOException raised during any optional +cleanup operations. + + +### Thread safety and atomicity + +Output streams themselves aren't formally required to be thread safe, +but as applications do sometimes assume they are, this call MUST be thread safe. + +## Path/Stream capability "fs.capability.outputstream.abortable" + + +An application MUST be able to verify that a stream supports the `Abortable.abort()` +operation without actually calling it. This is done through the `StreamCapabilities` +interface. + +1. If a stream instance supports `Abortable` then it MUST return `true` +in the probe `hasCapability("fs.capability.outputstream.abortable")` + +1. If a stream instance does not support `Abortable` then it MUST return `false` +in the probe `hasCapability("fs.capability.outputstream.abortable")` + +That is: if a stream declares its support for the feature, a call to `abort()` +SHALL meet the defined semantics of the operation. + +FileSystem/FileContext implementations SHOULD declare support similarly, to +allow for applications to probe for the feature in the destination directory/path. + +If a filesystem supports `Abortable` under a path `P` then it SHOULD return `true` to +`PathCababilities.hasPathCapability(path, "fs.capability.outputstream.abortable")` +This is to allow applications to verify that the store supports the feature. + +If a filesystem does not support `Abortable` under a path `P` then it MUST +return `false` to +`PathCababilities.hasPathCapability(path, "fs.capability.outputstream.abortable")` + + + diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md index 665e328447d5b..a5a35df30c0b5 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md @@ -116,6 +116,36 @@ for both files and directories, MUST always return `true` to the `isEncrypted()` predicate. This can be done by setting the `encrypted` flag to true when creating the `FileStatus` instance. + +### `msync()` + +Synchronize metadata state of the client with the latest state of the metadata +service of the FileSystem. + +In highly available FileSystems standby service can be used as a read-only +metadata replica. This call is essential to guarantee consistency of +reads from the standby replica and to avoid stale reads. + +It is currently only implemented for HDFS and others will just throw +`UnsupportedOperationException`. + +#### Preconditions + + +#### Postconditions + +This call internally records the state of the metadata service at the time of +the call. This guarantees consistency of subsequent reads from any metadata +replica. It assures the client will never access the state of the metadata that +preceded the recorded state. + +#### HDFS implementation notes + +HDFS supports `msync()` in HA mode by calling the Active NameNode and requesting +its latest journal transaction ID. For more details see HDFS documentation +[Consistent Reads from HDFS Observer NameNode](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/ObserverNameNode.html) + + ### `Path getHomeDirectory()` The function `getHomeDirectory` returns the home directory for the FileSystem @@ -294,6 +324,24 @@ any optimizations. The atomicity and consistency constraints are as for `listStatus(Path, PathFilter)`. +### `RemoteIterator listStatusIterator(Path p)` + +Return an iterator enumerating the `FileStatus` entries under +a path. This is similar to `listStatus(Path)` except the fact that +rather than returning an entire list, an iterator is returned. +The result is exactly the same as `listStatus(Path)`, provided no other +caller updates the directory during the listing. Having said that, this does +not guarantee atomicity if other callers are adding/deleting the files +inside the directory while listing is being performed. Different filesystems +may provide a more efficient implementation, for example S3A does the +listing in pages and fetches the next pages asynchronously while a +page is getting processed. + +Note that now since the initial listing is async, bucket/path existence +exception may show up later during next() call. + +Callers should prefer using listStatusIterator over listStatus as it +is incremental in nature. ### `FileStatus[] listStatus(Path[] paths)` @@ -616,11 +664,15 @@ For instance, HDFS may raise an `InvalidPathException`. result = FSDataOutputStream -The updated (valid) FileSystem must contains all the parent directories of the path, as created by `mkdirs(parent(p))`. +A zero byte file MUST exist at the end of the specified path, visible to all. + +The updated (valid) FileSystem MUST contain all the parent directories of the path, as created by `mkdirs(parent(p))`. The result is `FSDataOutputStream`, which through its operations may generate new filesystem states with updated values of `FS.Files[p]` +The behavior of the returned stream is covered in [Output](outputstream.html). + #### Implementation Notes * Some implementations split the create into a check for the file existing @@ -629,10 +681,18 @@ The result is `FSDataOutputStream`, which through its operations may generate ne clients creating files with `overwrite==true` to fail if the file is created by another client between the two tests. -* S3A, Swift and potentially other Object Stores do not currently change the FS state +* S3A, Swift and potentially other Object Stores do not currently change the `FS` state until the output stream `close()` operation is completed. -This MAY be a bug, as it allows >1 client to create a file with `overwrite==false`, - and potentially confuse file/directory logic +This is a significant difference between the behavior of object stores +and that of filesystems, as it allows >1 client to create a file with `overwrite=false`, +and potentially confuse file/directory logic. In particular, using `create()` to acquire +an exclusive lock on a file (whoever creates the file without an error is considered +the holder of the lock) may not not a safe algorithm to use when working with object stores. + +* Object stores may create an empty file as a marker when a file is created. +However, object stores with `overwrite=true` semantics may not implement this atomically, +so creating files with `overwrite=false` cannot be used as an implicit exclusion +mechanism between processes. * The Local FileSystem raises a `FileNotFoundException` when trying to create a file over a directory, hence it is listed as an exception that MAY be raised when @@ -644,6 +704,8 @@ this precondition fails. Make a `FSDataOutputStreamBuilder` to specify the parameters to create a file. +The behavior of the returned stream is covered in [Output](outputstream.html). + #### Implementation Notes `createFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make @@ -669,17 +731,21 @@ Implementations without a compliant call SHOULD throw `UnsupportedOperationExcep #### Postconditions - FS + FS' = FS result = FSDataOutputStream Return: `FSDataOutputStream`, which can update the entry `FS.Files[p]` by appending data to the existing list. +The behavior of the returned stream is covered in [Output](outputstream.html). + ### `FSDataOutputStreamBuilder appendFile(Path p)` Make a `FSDataOutputStreamBuilder` to specify the parameters to append to an existing file. +The behavior of the returned stream is covered in [Output](outputstream.html). + #### Implementation Notes `appendFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make @@ -1098,7 +1164,7 @@ deletion, preventing the stores' use as drop-in replacements for HDFS. ### `boolean rename(Path src, Path d)` -In terms of its specification, `rename()` is one of the most complex operations within a filesystem . +In terms of its specification, `rename()` is one of the most complex operations within a filesystem. In terms of its implementation, it is the one with the most ambiguity regarding when to return false versus raising an exception. @@ -1107,7 +1173,7 @@ Rename includes the calculation of the destination path. If the destination exists and is a directory, the final destination of the rename becomes the destination + the filename of the source path. - let dest = if (isDir(FS, src) and d != src) : + let dest = if (isDir(FS, d) and d != src) : d + [filename(src)] else : d @@ -1121,7 +1187,6 @@ Source `src` must exist: exists(FS, src) else raise FileNotFoundException - `dest` cannot be a descendant of `src`: if isDescendant(FS, src, dest) : raise IOException @@ -1186,10 +1251,10 @@ If `src` is a directory then all its children will then exist under `dest`, whil `src` and its descendants will no longer exist. The names of the paths under `dest` will match those under `src`, as will the contents: - if isDir(FS, src) isDir(FS, dest) and src != dest : + if isDir(FS, src) and isDir(FS, dest) and src != dest : FS' where: not exists(FS', src) - and dest in FS'.Directories] + and dest in FS'.Directories and forall c in descendants(FS, src) : not exists(FS', c)) and forall c in descendants(FS, src) where isDir(FS, c): @@ -1217,6 +1282,15 @@ that the parent directories of the destination also exist. exists(FS', parent(dest)) +*S3A FileSystem* + +The outcome is as a normal rename, with the additional (implicit) feature that +the parent directories of the destination then exist: +`exists(FS', parent(dest))` + +There is a check for and rejection if the `parent(dest)` is a file, but +no checks for any other ancestors. + *Other Filesystems (including Swift) * Other filesystems strictly reject the operation, raising a `FileNotFoundException` diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md index df538ee6cf96b..a4aa136033a0c 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md @@ -32,9 +32,12 @@ HDFS as these are commonly expected by Hadoop client applications. 1. [Notation](notation.html) 1. [Model](model.html) 1. [FileSystem class](filesystem.html) +1. [OutputStream, Syncable and `StreamCapabilities`](outputstream.html) +1. [Abortable](abortable.html) 1. [FSDataInputStream class](fsdatainputstream.html) 1. [PathCapabilities interface](pathcapabilities.html) 1. [FSDataOutputStreamBuilder class](fsdataoutputstreambuilder.html) 2. [Testing with the Filesystem specification](testing.html) 2. [Extending the specification and its tests](extending.html) 1. [Uploading a file using Multiple Parts](multipartuploader.html) +1. [IOStatistics](iostatistics.html) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/iostatistics.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/iostatistics.md new file mode 100644 index 0000000000000..bd77dc7e0f8a7 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/iostatistics.md @@ -0,0 +1,432 @@ + + +# Statistic collection with the IOStatistics API + +```java +@InterfaceAudience.Public +@InterfaceStability.Unstable +``` + +The `IOStatistics` API is intended to provide statistics on individual IO +classes -such as input and output streams, *in a standard way which +applications can query* + +Many filesystem-related classes have implemented statistics gathering +and provided private/unstable ways to query this, but as they were +not common across implementations it was unsafe for applications +to reference these values. Example: `S3AInputStream` and its statistics +API. This is used in internal tests, but cannot be used downstream in +applications such as Apache Hive or Apache HBase. + +The IOStatistics API is intended to + +1. Be instance specific:, rather than shared across multiple instances + of a class, or thread local. +1. Be public and stable enough to be used by applications. +1. Be easy to use in applications written in Java, Scala, and, via libhdfs, C/C++ +1. Have foundational interfaces and classes in the `hadoop-common` JAR. + +## Core Model + +Any class *may* implement `IOStatisticsSource` in order to +provide statistics. + +Wrapper I/O Classes such as `FSDataInputStream` anc `FSDataOutputStream` *should* +implement the interface and forward it to the wrapped class, if they also +implement it -and return `null` if they do not. + +`IOStatisticsSource` implementations `getIOStatistics()` return an +instance of `IOStatistics` enumerating the statistics of that specific +instance. + +The `IOStatistics` Interface exports five kinds of statistic: + + +| Category | Type | Description | +|------|------|-------------| +| `counter` | `long` | a counter which may increase in value; SHOULD BE >= 0 | +| `gauge` | `long` | an arbitrary value which can down as well as up; SHOULD BE >= 0 | +| `minimum` | `long` | an minimum value; MAY BE negative | +| `maximum` | `long` | a maximum value; MAY BE negative | +| `meanStatistic` | `MeanStatistic` | an arithmetic mean and sample size; mean MAY BE negative | + +Four are simple `long` values, with the variations how they are likely to +change and how they are aggregated. + + +#### Aggregation of Statistic Values + +For the different statistic category, the result of `aggregate(x, y)` is + +| Category | Aggregation | +|------------------|-------------| +| `counter` | `max(0, x) + max(0, y)` | +| `gauge` | `max(0, x) + max(0, y)` | +| `minimum` | `min(x, y)` | +| `maximum` | `max(x, y)` | +| `meanStatistic` | calculation of the mean of `x` and `y` ) | + + +#### Class `MeanStatistic` + +## package `org.apache.hadoop.fs.statistics` + +This package contains the public statistics APIs intended +for use by applications. + + + + + +`MeanStatistic` is a tuple of `(mean, samples)` to support aggregation. + +A `MeanStatistic` with a sample of `0` is considered an empty statistic. + +All `MeanStatistic` instances where `sample = 0` are considered equal, +irrespective of the `mean` value. + +Algorithm to calculate the mean : + +```python +if x.samples = 0: + y +else if y.samples = 0 : + x +else: + samples' = x.samples + y.samples + mean' = (x.mean * x.samples) + (y.mean * y.samples) / samples' + (samples', mean') +``` + +Implicitly, this means that if both samples are empty, then the aggregate value is also empty. + +```java +public final class MeanStatistic implements Serializable, Cloneable { + /** + * Arithmetic mean. + */ + private double mean; + + /** + * Number of samples used to calculate + * the mean. + */ + private long samples; + + /** + * Get the mean value. + * @return the mean + */ + public double getMean() { + return mean; + } + + /** + * Get the sample count. + * @return the sample count; 0 means empty + */ + public long getSamples() { + return samples; + } + + /** + * Is a statistic empty? + * @return true if the sample count is 0 + */ + public boolean isEmpty() { + return samples == 0; + } + /** + * Add another mean statistic to create a new statistic. + * When adding two statistics, if either is empty then + * a copy of the non-empty statistic is returned. + * If both are empty then a new empty statistic is returned. + * + * @param other other value + * @return the aggregate mean + */ + public MeanStatistic add(final MeanStatistic other) { + /* Implementation elided. */ + } + @Override + public int hashCode() { + return Objects.hash(mean, samples); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { return true; } + if (o == null || getClass() != o.getClass()) { return false; } + MeanStatistic that = (MeanStatistic) o; + if (this.isEmpty()) { + return that.isEmpty(); + } + return Double.compare(that.mean, mean) == 0 && + samples == that.samples; + } + + @Override + public MeanStatistic clone() { + return new MeanStatistic(this); + } + + public MeanStatistic copy() { + return new MeanStatistic(this); + } + +} +``` + + + + + +### class `org.apache.hadoop.fs.statistics.IOStatisticsSource` + +```java + +/** + * A source of IO statistics. + * These statistics MUST be instance specific, not thread local. + */ +@InterfaceStability.Unstable +public interface IOStatisticsSource { + + /** + * Return a statistics instance. + * It is not a requirement that the same instance is returned every time. + * {@link IOStatisticsSource}. + * If the object implementing this is Closeable, this method + * may return null if invoked on a closed object, even if + * it returns a valid instance when called earlier. + * @return an IOStatistics instance or null + */ + IOStatistics getIOStatistics(); +} +``` + +This is the interface which an object instance MUST implement if they are a source of +IOStatistics information. + +#### Invariants + +The result of `getIOStatistics()` must be one of + +* `null` +* an immutable `IOStatistics` for which each map of entries is +an empty map. +* an instance of an `IOStatistics` whose statistics MUST BE unique to that +instance of the class implementing `IOStatisticsSource`. + +Less formally: if the statistics maps returned are non-empty, all the statistics +must be collected from the current instance, and not from any other instances, the way +some of the `FileSystem` statistics are collected. + + +The result of `getIOStatistics()`, if non-null, MAY be a different instance +on every invocation. + + + + + + +### class `org.apache.hadoop.fs.statistics.IOStatistics` + +These are per-instance statistics provided by an object which +implements `IOStatisticsSource`. + +```java +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface IOStatistics { + + /** + * Map of counters. + * @return the current map of counters. + */ + Map counters(); + + /** + * Map of gauges. + * @return the current map of gauges. + */ + Map gauges(); + + /** + * Map of minumums. + * @return the current map of minumums. + */ + Map minumums(); + + /** + * Map of maximums. + * @return the current map of maximums. + */ + Map maximums(); + + /** + * Map of meanStatistics. + * @return the current map of MeanStatistic statistics. + */ + Map meanStatistics(); + +} +``` + +### Statistic Naming + +The naming policy of statistics is designed to be readable, shareable +and ideally consistent across `IOStatisticSource` implementations. + +* Characters in key names MUST match the regular expression + `[a-z|0-9|_]` with the exception of the first character, which + MUST be in the range `[a-z]`. Thus the full regular expression + for a valid statistic name is: + + [a-z][a-z|0-9|_]+ + +* Where possible, the names of statistics SHOULD be those defined + with common names. + + org.apache.hadoop.fs.statistics.StreamStatisticNames + org.apache.hadoop.fs.statistics.StoreStatisticNames + + Note 1.: these are evolving; for clients to safely reference their + statistics by name they SHOULD be copied to the application. + (i.e. for an application compiled hadoop 3.4.2 to link against hadoop 3.4.1, + copy the strings). + + Note 2: keys defined in these classes SHALL NOT be removed + from subsequent Hadoop releases. + +* A common statistic name MUST NOT be used to report any other statistic and + MUST use the pre-defined unit of measurement. + +* A statistic name in one of the maps SHOULD NOT be re-used in another map. + This aids diagnostics of logged statistics. + +### Statistic Maps + +For each map of statistics returned: + +* The operations to add/remove entries are unsupported: the map returned + MAY be mutable by the source of statistics. + +* The map MAY be empty. + +* The map keys each represent a measured statistic. + +* The set of keys in a map SHOULD remain unchanged, and MUST NOT remove keys. + +* The statistics SHOULD be dynamic: every lookup of an entry SHOULD + return the latest value. + +* The values MAY change across invocations of `Map.values()` and `Map.entries()` + +* The update MAY be in the `iterable()` calls of the iterators returned, + or MAY be in the actual `iterable.next()` operation. That is: there is + no guarantee as to when the evaluation takes place. + +* The returned `Map.Entry` instances MUST return the same value on + repeated `getValue()` calls. (i.e once you have the entry, it is immutable). + +* Queries of statistics SHOULD be fast and non-blocking to the extent + that if invoked during a long operation, they will prioritize + returning fast over most timely values. + +* The statistics MAY lag; especially for statistics collected in separate + operations (e.g stream IO statistics as provided by a filesystem + instance). + +* Statistics which represent time SHOULD use milliseconds as their unit. + +* Statistics which represent time and use a different unit MUST document + the unit used. + +### Thread Model + +1. An instance of `IOStatistics` can be shared across threads; + +1. Read access to the supplied statistics maps MUST be thread safe. + +1. Iterators returned from the maps MUST NOT be shared across threads. + +1. The statistics collected MUST include all operations which took + place across all threads performing work for the monitored object. + +1. The statistics reported MUST NOT be local to the active thread. + +This is different from the `FileSystem.Statistics` behavior where per-thread statistics +are collected and reported. + +That mechanism supports collecting limited read/write statistics for different +worker threads sharing the same FS instance, but as the collection is thread local, +it invariably under-reports IO performed in other threads on behalf of a worker thread. + + +## Statisic Snapshot + +A snapshot of the current statistic values MAY be obtained by calling +`IOStatisticsSupport.snapshotIOStatistics()` + +```java + public static X + snapshotIOStatistics(IOStatistics statistics) +``` + +This snapshot is serializable through Java serialization and through +Jackson to/from JSON. + +## Helper Classes + + +### class `org.apache.hadoop.fs.statistics.IOStatisticsSupport` + +This provides helper methods to work with IOStatistics sources and instances. + +Consult the javadocs for its operations. + +### class `org.apache.hadoop.fs.statistics.IOStatisticsLogging` + +Support for efficiently logging `IOStatistics`/`IOStatisticsSource` +instances. + +These are intended for assisting logging, including only enumerating the +state of an `IOStatistics` instance when the log level needs it. + +```java +LOG.info("IOStatistics after upload: {}", demandStringify(iostats)); + +// or even better, as it results in only a single object creations +Object latest = demandStringify(iostats); +LOG.info("IOStatistics : {}", latest); +/* do some work. */ +LOG.info("IOStatistics : {}", latest); + +``` + +## Package `org.apache.hadoop.fs.statistics.impl` + +This contains implementation classes to support providing statistics to applications. + +These MUST NOT BE used by applications. If a feature is needed from this package then +the provisioning of a public implementation MAY BE raised via the Hadoop development +channels. + +These MAY be used by those implementations of the Hadoop `FileSystem`, `AbstractFileSystem` +and related classes which are not in the hadoop source tree. Implementors MUST BE +aware that the implementation this code is unstable and may change across +minor point releases of Hadoop. diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md index 629c0c418fdf2..906c592eea09d 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/multipartuploader.md @@ -14,14 +14,14 @@ - + -# class `org.apache.hadoop.fs.MultipartUploader` +# interface `org.apache.hadoop.fs.MultipartUploader` -The abstract `MultipartUploader` class is the original class to upload a file +The `MultipartUploader` can upload a file using multiple parts to Hadoop-supported filesystems. The benefits of a multipart upload is that the file can be uploaded from multiple clients or processes in parallel and the results will not be visible to other clients until @@ -30,13 +30,12 @@ the `complete` function is called. When implemented by an object store, uploaded data may incur storage charges, even before it is visible in the filesystems. Users of this API must be diligent and always perform best-effort attempts to complete or abort the upload. +The `abortUploadsUnderPath(path)` operation can help here. ## Invariants -All the requirements of a valid MultipartUploader are considered implicit +All the requirements of a valid `MultipartUploader` are considered implicit econditions and postconditions: -all operations on a valid MultipartUploader MUST result in a new -MultipartUploader that is also valid. The operations of a single multipart upload may take place across different instance of a multipart uploader, across different processes and hosts. @@ -45,16 +44,28 @@ It is therefore a requirement that: 1. All state needed to upload a part, complete an upload or abort an upload must be contained within or retrievable from an upload handle. -1. If an upload handle is marshalled to another process, then, if the -receiving process has the correct permissions, it may participate in the -upload, by uploading one or more parts, by completing an upload, and/or by -aborting the upload. +1. That handle MUST be serializable; it MUST be deserializable to different +processes executing the exact same version of Hadoop. + +1. different hosts/processes MAY upload different parts, sequentially or +simultaneously. The order in which they are uploaded to the filesystem +MUST NOT constrain the order in which the data is stored in the final file. + +1. An upload MAY be completed on a different instance than any which uploaded +parts. + +1. The output of an upload MUST NOT be visible at the final destination +until the upload may complete. + +1. It is not an error if a single multipart uploader instance initiates +or completes multiple uploads files to the same destination sequentially, +irrespective of whether or not the store supports concurrent uploads. ## Concurrency Multiple processes may upload parts of a multipart upload simultaneously. -If a call is made to `initialize(path)` to a destination where an active +If a call is made to `startUpload(path)` to a destination where an active upload is in progress, implementations MUST perform one of the two operations. * Reject the call as a duplicate. @@ -70,9 +81,17 @@ the in-progress upload, if it has not completed, must not be included in the final file, in whole or in part. Implementations SHOULD raise an error in the `putPart()` operation. +# Serialization Compatibility + +Users MUST NOT expect that serialized PathHandle versions are compatible across +* different multipart uploader implementations. +* different versions of the same implementation. + +That is: all clients MUST use the exact same version of Hadoop. + ## Model -A File System which supports Multipart Uploads extends the existing model +A FileSystem/FileContext which supports Multipart Uploads extends the existing model `(Directories, Files, Symlinks)` to one of `(Directories, Files, Symlinks, Uploads)` `Uploads` of type `Map[UploadHandle -> Map[PartHandle -> UploadPart]`. @@ -112,11 +131,40 @@ However, if Part Handles are rapidly recycled, there is a risk that the nominall idempotent operation `abort(FS, uploadHandle)` could unintentionally cancel a successor operation which used the same Upload Handle. +## Asynchronous API + +All operations return `CompletableFuture<>` types which must be +subsequently evaluated to get their return values. + +1. The execution of the operation MAY be a blocking operation in on the call thread. +1. If not, it SHALL be executed in a separate thread and MUST complete by the time the +future evaluation returns. +1. Some/All preconditions MAY be evaluated at the time of initial invocation, +1. All those which are not evaluated at that time, MUST Be evaluated during the execution +of the future. + + +What this means is that when an implementation interacts with a fast file system/store all preconditions +including the existence of files MAY be evaluated early, whereas and implementation interacting with a +remote object store whose probes are slow MAY verify preconditions in the asynchronous phase -especially +those which interact with the remote store. + +Java CompletableFutures do not work well with checked exceptions. The Hadoop codease is still evolving the +details of the exception handling here, as more use is made of the asynchronous APIs. Assume that any +precondition failure which declares that an `IOException` MUST be raised may have that operation wrapped in a +`RuntimeException` of some form if evaluated in the future; this also holds for any other `IOException` +raised during the operations. + +### `close()` + +Applications MUST call `close()` after using an uploader; this is so it may release other +objects, update statistics, etc. + ## State Changing Operations -### `UploadHandle initialize(Path path)` +### `CompletableFuture startUpload(Path)` -Initialized a Multipart Upload, returning an upload handle for use in +Starts a Multipart Upload, ultimately returning an `UploadHandle` for use in subsequent operations. #### Preconditions @@ -128,17 +176,15 @@ if exists(FS, path) and not isFile(FS, path) raise PathIsDirectoryException, IOE ``` If a filesystem does not support concurrent uploads to a destination, -then the following precondition is added +then the following precondition is added: ```python if path in values(FS.Uploads) raise PathExistsException, IOException - ``` - #### Postconditions -The outcome of this operation is that the filesystem state is updated with a new +Once the initialization operation completes, the filesystem state is updated with a new active upload, with a new handle, this handle being returned to the caller. ```python @@ -147,9 +193,10 @@ FS' = FS where FS'.Uploads(handle') == {} result = handle' ``` -### `PartHandle putPart(Path path, InputStream inputStream, int partNumber, UploadHandle uploadHandle, long lengthInBytes)` +### `CompletableFuture putPart(UploadHandle uploadHandle, int partNumber, Path filePath, InputStream inputStream, long lengthInBytes)` -Upload a part for the multipart upload. +Upload a part for the specific multipart upload, eventually being returned an opaque part handle +represting this part of the specified upload. #### Preconditions @@ -170,10 +217,12 @@ FS' = FS where FS'.uploads(uploadHandle).parts(partHandle') == data' result = partHandle' ``` -The data is stored in the filesystem, pending completion. +The data is stored in the filesystem, pending completion. It MUST NOT be visible at the destination path. +It MAY be visible in a temporary path somewhere in the file system; +This is implementation-specific and MUST NOT be relied upon. -### `PathHandle complete(Path path, Map parts, UploadHandle multipartUploadId)` +### ` CompletableFuture complete(UploadHandle uploadId, Path filePath, Map handles)` Complete the multipart upload. @@ -188,11 +237,23 @@ uploadHandle in keys(FS.Uploads) else raise FileNotFoundException FS.Uploads(uploadHandle).path == path if exists(FS, path) and not isFile(FS, path) raise PathIsDirectoryException, IOException parts.size() > 0 +forall k in keys(parts): k > 0 +forall k in keys(parts): + not exists(k2 in keys(parts)) where (parts[k] == parts[k2]) ``` -If there are handles in the MPU which aren't included in the map, then the omitted -parts will not be a part of the resulting file. It is up to the implementation -of the MultipartUploader to make sure the leftover parts are cleaned up. +All keys MUST be greater than zero, and there MUST not be any duplicate +references to the same parthandle. +These validations MAY be performed at any point during the operation. +After a failure, there is no guarantee that a `complete()` call for this +upload with a valid map of paths will complete. +Callers SHOULD invoke `abort()` after any such failure to ensure cleanup. + +if `putPart()` operations For this `uploadHandle` were performed But whose +`PathHandle` Handles were not included in this request -the omitted +parts SHALL NOT be a part of the resulting file. + +The MultipartUploader MUST clean up any such outstanding entries. In the case of backing stores that support directories (local filesystem, HDFS, etc), if, at the point of completion, there is now a directory at the @@ -206,14 +267,14 @@ exists(FS', path') and result = PathHandle(path') FS' = FS where FS.Files(path) == UploadData' and not uploadHandle in keys(FS'.uploads) ``` -The PathHandle is returned by the complete operation so subsequent operations +The `PathHandle` is returned by the complete operation so subsequent operations will be able to identify that the data has not changed in the meantime. The order of parts in the uploaded by file is that of the natural order of -parts: part 1 is ahead of part 2, etc. +parts in the map: part 1 is ahead of part 2, etc. -### `void abort(Path path, UploadHandle multipartUploadId)` +### `CompletableFuture abort(UploadHandle uploadId, Path filePath)` Abort a multipart upload. The handle becomes invalid and not subject to reuse. @@ -233,3 +294,23 @@ FS' = FS where not uploadHandle in keys(FS'.uploads) ``` A subsequent call to `abort()` with the same handle will fail, unless the handle has been recycled. + +### `CompletableFuture abortUploadsUnderPath(Path path)` + +Perform a best-effort cleanup of all uploads under a path. + +returns a future which resolves to. + + -1 if unsuppported + >= 0 if supported + +Because it is best effort a strict postcondition isn't possible. +The ideal postcondition is all uploads under the path are aborted, +and the count is the number of uploads aborted: + +```python +FS'.uploads forall upload in FS.uploads: + not isDescendant(FS, path, upload.path) +return len(forall upload in FS.uploads: + isDescendant(FS, path, upload.path)) +``` diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/outputstream.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/outputstream.md new file mode 100644 index 0000000000000..8d0d4c4354f0b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/outputstream.md @@ -0,0 +1,1016 @@ + + + + +# Output: `OutputStream`, `Syncable` and `StreamCapabilities` + +## Introduction + +This document covers the Output Streams within the context of the +[Hadoop File System Specification](index.html). + +It uses the filesystem model defined in [A Model of a Hadoop Filesystem](model.html) +with the notation defined in [notation](Notation.md). + +The target audiences are: +1. Users of the APIs. While `java.io.OutputStream` is a standard interfaces, +this document clarifies how it is implemented in HDFS and elsewhere. +The Hadoop-specific interfaces `Syncable` and `StreamCapabilities` are new; +`Syncable` is notable in offering durability and visibility guarantees which +exceed that of `OutputStream`. +1. Implementors of File Systems and clients. + +## How data is written to a filesystem + +The core mechanism to write data to files through the Hadoop FileSystem APIs +is through `OutputStream` subclasses obtained through calls to +`FileSystem.create()`, `FileSystem.append()`, +or `FSDataOutputStreamBuilder.build()`. + +These all return instances of `FSDataOutputStream`, through which data +can be written through various `write()` methods. +After a stream's `close()` method is called, all data written to the +stream MUST BE persisted to the fileysystem and visible to oll other +clients attempting to read data from that path via `FileSystem.open()`. + +As well as operations to write the data, Hadoop's `OutputStream` implementations +provide methods to flush buffered data back to the filesystem, +so as to ensure that the data is reliably persisted and/or visible +to other callers. This is done via the `Syncable` interface. It was +originally intended that the presence of this interface could be interpreted +as a guarantee that the stream supported its methods. However, this has proven +impossible to guarantee as the static nature of the interface is incompatible +with filesystems whose syncability semantics may vary on a store/path basis. +As an example, erasure coded files in HDFS do not support the Sync operations, +even though they are implemented as subclass of an output stream which is `Syncable`. + +A new interface: `StreamCapabilities`. This allows callers +to probe the exact capabilities of a stream, even transitively +through a chain of streams. + +## Output Stream Model + +For this specification, an output stream can be viewed as a list of bytes +stored in the client; `hsync()` and `hflush()` are operations the actions +which propagate the data to be visible to other readers of the file and/or +made durable. + +```python +buffer: List[byte] +``` + +A flag, `open` tracks whether the stream is open: after the stream +is closed no more data may be written to it: + +```python +open: bool +buffer: List[byte] +``` + +The destination path of the stream, `path`, can be tracked to form a triple +`path, open, buffer` + +```python +Stream = (path: Path, open: Boolean, buffer: byte[]) +``` + +#### Visibility of Flushed Data + +(Immediately) after `Syncable` operations which flush data to the filesystem, +the data at the stream's destination path MUST match that of +`buffer`. That is, the following condition MUST hold: + +```python +FS'.Files(path) == buffer +``` + +Any client reading the data at the path MUST see the new data. +The `Syncable` operations differ in their durability +guarantees, not visibility of data. + +### State of Stream and File System after `Filesystem.create()` + +The output stream returned by a `FileSystem.create(path)` or +`FileSystem.createFile(path).build()` within a filesystem `FS`, +can be modeled as a triple containing an empty array of no data: + +```python +Stream' = (path, true, []) +``` + +The filesystem `FS'` MUST contain a 0-byte file at the path: + +```python +FS' = FS where data(FS', path) == [] +``` + +Thus, the initial state of `Stream'.buffer` is implicitly +consistent with the data at the filesystem. + + +*Object Stores*: see caveats in the "Object Stores" section below. + +### State of Stream and File System after `Filesystem.append()` + +The output stream returned from a call of + `FileSystem.append(path, buffersize, progress)` within a filesystem `FS`, +can be modelled as a stream whose `buffer` is intialized to that of +the original file: + +```python +Stream' = (path, true, data(FS, path)) +``` + +#### Persisting data + +When the stream writes data back to its store, be it in any +supported flush operation, in the `close()` operation, or at any other +time the stream chooses to do so, the contents of the file +are replaced with the current buffer + +```python +Stream' = (path, true, buffer) +FS' = FS where data(FS', path) == buffer +``` + +After a call to `close()`, the stream is closed for all operations other +than `close()`; they MAY fail with `IOException` or `RuntimeException`. + +```python +Stream' = (path, false, []) +``` + +The `close()` operation MUST be idempotent with the sole attempt to write the +data made in the first invocation. + +1. If `close()` succeeds, subsequent calls are no-ops. +1. If `close()` fails, again, subsequent calls are no-ops. They MAY rethrow +the previous exception, but they MUST NOT retry the write. + + + + + +##
    Class `FSDataOutputStream` + +```java +public class FSDataOutputStream + extends DataOutputStream + implements Syncable, CanSetDropBehind, StreamCapabilities { + // ... +} +``` + +The `FileSystem.create()`, `FileSystem.append()` and +`FSDataOutputStreamBuilder.build()` calls return an instance +of a class `FSDataOutputStream`, a subclass of `java.io.OutputStream`. + +The base class wraps an `OutputStream` instance, one which may implement `Syncable`, +`CanSetDropBehind` and `StreamCapabilities`. + +This document covers the requirements of such implementations. + +HDFS's `FileSystem` implementation, `DistributedFileSystem`, returns an instance +of `HdfsDataOutputStream`. This implementation has at least two behaviors +which are not explicitly declared by the base Java implmentation + +1. Writes are synchronized: more than one thread can write to the same +output stream. This is a use pattern which HBase relies on. + +1. `OutputStream.flush()` is a no-op when the file is closed. Apache Druid +has made such a call on this in the past +[HADOOP-14346](https://issues.apache.org/jira/browse/HADOOP-14346). + + +As the HDFS implementation is considered the de-facto specification of +the FileSystem APIs, the fact that `write()` is thread-safe is significant. + +For compatibility, not only SHOULD other FS clients be thread-safe, +but new HDFS features, such as encryption and Erasure Coding SHOULD also +implement consistent behavior with the core HDFS output stream. + +Put differently: + +*It isn't enough for Output Streams to implement the core semantics +of `java.io.OutputStream`: they need to implement the extra semantics +of `HdfsDataOutputStream`, especially for HBase to work correctly.* + +The concurrent `write()` call is the most significant tightening of +the Java specification. + +## Class `java.io.OutputStream` + +A Java `OutputStream` allows applications to write a sequence of bytes to a destination. +In a Hadoop filesystem, that destination is the data under a path in the filesystem. + +```java +public abstract class OutputStream implements Closeable, Flushable { + public abstract void write(int b) throws IOException; + public void write(byte b[]) throws IOException; + public void write(byte b[], int off, int len) throws IOException; + public void flush() throws IOException; + public void close() throws IOException; +} +``` +### `write(Stream, data)` + +Writes a byte of data to the stream. + +#### Preconditions + +```python +Stream.open else raise ClosedChannelException, PathIOException, IOException +``` + +The exception `java.nio.channels.ClosedChannelExceptionn` is +raised in the HDFS output streams when trying to write to a closed file. +This exception does not include the destination path; and +`Exception.getMessage()` is `null`. It is therefore of limited value in stack +traces. Implementors may wish to raise exceptions with more detail, such +as a `PathIOException`. + + +#### Postconditions + +The buffer has the lower 8 bits of the data argument appended to it. + +```python +Stream'.buffer = Stream.buffer + [data & 0xff] +``` + +There may be an explicit limit on the size of cached data, or an implicit +limit based by the available capacity of the destination filesystem. +When a limit is reached, `write()` SHOULD fail with an `IOException`. + +### `write(Stream, byte[] data, int offset, int len)` + + +#### Preconditions + +The preconditions are all defined in `OutputStream.write()` + +```python +Stream.open else raise ClosedChannelException, PathIOException, IOException +data != null else raise NullPointerException +offset >= 0 else raise IndexOutOfBoundsException +len >= 0 else raise IndexOutOfBoundsException +offset < data.length else raise IndexOutOfBoundsException +offset + len < data.length else raise IndexOutOfBoundsException +``` + +After the operation has returned, the buffer may be re-used. The outcome +of updates to the buffer while the `write()` operation is in progress is undefined. + +#### Postconditions + +```python +Stream'.buffer = Stream.buffer + data[offset...(offset + len)] +``` + +### `write(byte[] data)` + +This is defined as the equivalent of: + +```python +write(data, 0, data.length) +``` + +### `flush()` + +Requests that the data is flushed. The specification of `ObjectStream.flush()` +declares that this SHOULD write data to the "intended destination". + +It explicitly precludes any guarantees about durability. + +For that reason, this document doesn't provide any normative +specifications of behaviour. + +#### Preconditions + +None. + +#### Postconditions + +None. + +If the implementation chooses to implement a stream-flushing operation, +the data may be saved to the file system such that it becomes visible to +others" + +```python +FS' = FS where data(FS', path) == buffer +``` + +When a stream is closed, `flush()` SHOULD downgrade to being a no-op, if it was not +one already. This is to work with applications and libraries which can invoke +it in exactly this way. + + +*Issue*: Should `flush()` forward to `hflush()`? + +No. Or at least, make it optional. + +There's a lot of application code which assumes that `flush()` is low cost +and should be invoked after writing every single line of output, after +writing small 4KB blocks or similar. + +Forwarding this to a full flush across a distributed filesystem, or worse, +a distant object store, is very inefficient. +Filesystem clients which convert a `flush()` to an `hflush()` will eventually +have to roll back that feature: +[HADOOP-16548](https://issues.apache.org/jira/browse/HADOOP-16548). + +### `close()` + +The `close()` operation saves all data to the filesystem and +releases any resources used for writing data. + +The `close()` call is expected to block +until the write has completed (as with `Syncable.hflush()`), possibly +until it has been written to durable storage. + +After `close()` completes, the data in a file MUST be visible and consistent +with the data most recently written. The metadata of the file MUST be consistent +with the data and the write history itself (i.e. any modification time fields +updated). + +After `close()` is invoked, all subsequent `write()` calls on the stream +MUST fail with an `IOException`. + +Any locking/leaseholding mechanism MUST release its lock/lease. + +```python +Stream'.open = false +FS' = FS where data(FS', path) == buffer +``` + +The `close()` call MAY fail during its operation. + +1. Callers of the API MUST expect for some calls to `close()` to fail and SHOULD code appropriately. +Catching and swallowing exceptions, while common, is not always the ideal solution. +1. Even after a failure, `close()` MUST place the stream into a closed state. +Follow-on calls to `close()` are ignored, and calls to other methods +rejected. That is: caller's cannot be expected to call `close()` repeatedly +until it succeeds. +1. The duration of the `close()` operation is undefined. Operations which rely +on acknowledgements from remote systems to meet the persistence guarantees +implicitly have to await these acknowledgements. Some Object Store output streams +upload the entire data file in the `close()` operation. This can take a large amount +of time. The fact that many user applications assume that `close()` is both fast +and does not fail means that this behavior is dangerous. + +Recommendations for safe use by callers + +* Do plan for exceptions being raised, either in catching and logging or +by throwing the exception further up. Catching and silently swallowing exceptions +may hide serious problems. +* Heartbeat operations SHOULD take place on a separate thread, so that a long +delay in `close()` does not block the thread so long that the heartbeat times +out. + +Implementors: + +* Have a look at [HADOOP-16785](https://issues.apache.org/jira/browse/HADOOP-16785) +to see examples of complications in close. +* Incrementally writing blocks before a close operation results in a behavior which +matches client expectations better: write failures to surface earlier and close +to be more housekeeping than the actual upload. +* If block uploads are executed in separate threads, the output stream `close()` +call MUST block until all the asynchronous uploads have completed; any error raised +MUST be reported. +If multiple errors were raised, the stream can choose which to propagate. +What is important is: when `close()` returns without an error, applications expect +the data to have been successfully written. + +### HDFS and `OutputStream.close()` + +HDFS does not immediately `sync()` the output of a written file to disk on +`OutputStream.close()` unless configured with `dfs.datanode.synconclose` +is true. This has caused [problems in some applications](https://issues.apache.org/jira/browse/ACCUMULO-1364). + +Applications which absolutely require the guarantee that a file has been persisted +MUST call `Syncable.hsync()` *before* the file is closed. + + +## `org.apache.hadoop.fs.Syncable` + +```java +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface Syncable { + + + /** Flush out the data in client's user buffer. After the return of + * this call, new readers will see the data. + * @throws IOException if any error occurs + */ + void hflush() throws IOException; + + /** Similar to posix fsync, flush out the data in client's user buffer + * all the way to the disk device (but the disk may have it in its cache). + * @throws IOException if error occurs + */ + void hsync() throws IOException; +} +``` + +The purpose of `Syncable` interface is to provide guarantees that data is written +to a filesystem for both visibility and durability. + +*SYNC-1*: An `OutputStream` which implements `Syncable` and does not raise +`UnsupportedOperationException` on invocations is +making an explicit declaration that it can meet those guarantees. + +*SYNC-2*: If a stream, declares the interface as implemented, but does not +provide durability, the interface's methods MUST raise +`UnsupportedOperationException`. + +The `Syncable` interface has been implemented by other classes than +subclasses of `OutputStream`, such as `org.apache.hadoop.io.SequenceFile.Writer`. + +*SYNC-3* The fact that a class implements `Syncable` does not guarantee +that `extends OutputStream` holds. + +That is, for any class `C`: `(C instanceof Syncable)` does not imply +`(C instanceof OutputStream)` + +This specification only covers the required behavior of `OutputStream` subclasses +which implement `Syncable`. + + +*SYNC-4:* The return value of `FileSystem.create(Path)` is an instance +of `FSDataOutputStream`. + +*SYNC-5:* `FSDataOutputStream implements Syncable` + + +SYNC-5 and SYNC-1 imply that all output streams which can be created +with `FileSystem.create(Path)` must support the semantics of `Syncable`. +This is demonstrably not true: `FSDataOutputStream` simply downgrades +to a `flush()` if its wrapped stream is not `Syncable`. +Therefore the declarations SYNC-1 and SYNC-2 do not hold: you cannot trust `Syncable`. + +Put differently: *callers MUST NOT rely on the presence of the interface +as evidence that the semantics of `Syncable` are supported*. Instead +they MUST be dynamically probed for using the `StreamCapabilities` +interface, where available. + + +### `Syncable.hflush()` + +Flush out the data in client's user buffer. After the return of +this call, new readers will see the data. The `hflush()` operation +does not contain any guarantees as to the durability of the data. only +its visibility. + +Thus implementations may cache the written data in memory +—visible to all, but not yet persisted. + +#### Preconditions + +```python +hasCapability(Stream, "hflush") +Stream.open else raise IOException +``` + + +#### Postconditions + +```python +FS' = FS where data(path) == cache +``` + + +After the call returns, the data MUST be visible to all new callers +of `FileSystem.open(path)` and `FileSystem.openFile(path).build()`. + +There is no requirement or guarantee that clients with an existing +`DataInputStream` created by a call to `(FS, path)` will see the updated +data, nor is there a guarantee that they *will not* in a current or subsequent +read. + +Implementation note: as a correct `hsync()` implementation MUST also +offer all the semantics of an `hflush()` call, implementations of `hflush()` +may just invoke `hsync()`: + +```java +public void hflush() throws IOException { + hsync(); +} +``` + +#### `hflush()` Performance + +The `hflush()` call MUST block until the store has acknowledge that the +data has been received and is now visible to others. This can be slow, +as it will include the time to upload any outstanding data from the +client, and for the filesystem itself to process it. + +Often Filesystems only offer the `Syncable.hsync()` guarantees: persistence as +well as visibility. This means the time to return can be even greater. + +Application code MUST NOT call `hflush()` or `hsync()` at the end of every line +or, unless they are writing a WAL, at the end of every record. Use with care. + + +### `Syncable.hsync()` + +Similar to POSIX `fsync()`, this call saves the data in client's user buffer +all the way to the disk device (but the disk may have it in its cache). + +That is: it is a requirement for the underlying FS To save all the data to +the disk hardware itself, where it is expected to be durable. + +#### Preconditions + +```python +hasCapability(Stream, "hsync") +Stream.open else raise IOException +``` + +#### Postconditions + +```python +FS' = FS where data(path) == buffer +``` + +_Implementations are required to block until that write has been +acknowledged by the store._ + +This is so the caller can be confident that once the call has +returned successfully, the data has been written. + + + +## Interface `StreamCapabilities` + +```java +@InterfaceAudience.Public +@InterfaceStability.Evolving +``` + +The `org.apache.hadoop.fs.StreamCapabilities` interface exists to allow callers to dynamically +determine the behavior of a stream. + +```java + public boolean hasCapability(String capability) { + switch (capability.toLowerCase(Locale.ENGLISH)) { + case StreamCapabilities.HSYNC: + case StreamCapabilities.HFLUSH: + return supportFlush; + default: + return false; + } + } +``` + +Once a stream has been closed, a `hasCapability()` call MUST do one of + +* return the capabilities of the open stream. +* return false. + +That is: it MUST NOT raise an exception about the file being closed; + +See [pathcapabilities](pathcapabilities.html) for specifics on the `PathCapabilities` API; +the requirements are similar: a stream MUST NOT return true for a capability +for which it lacks support, be it because + +* The capability is unknown. +* The capability is known and known to be unsupported. + +Standard stream capabilities are defined in `StreamCapabilities`; +consult the javadocs for the complete set of options. + +| Name | Probes for support of | +|-------|---------| +| `dropbehind` | `CanSetDropBehind.setDropBehind()` | +| `hsync` | `Syncable.hsync()` | +| `hflush` | `Syncable.hflush()`. Deprecated: probe for `HSYNC` only. | +| `in:readahead` | `CanSetReadahead.setReadahead()` | +| `in:unbuffer"` | `CanUnbuffer.unbuffer()` | +| `in:readbytebuffer` | `ByteBufferReadable#read(ByteBuffer)` | +| `in:preadbytebuffer` | `ByteBufferPositionedReadable#read(long, ByteBuffer)` | + +Stream implementations MAY add their own custom options. +These MUST be prefixed with `fs.SCHEMA.`, where `SCHEMA` is the schema of the filesystem. + +## interface `CanSetDropBehind` + +```java +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface CanSetDropBehind { + /** + * Configure whether the stream should drop the cache. + * + * @param dropCache Whether to drop the cache. null means to use the + * default value. + * @throws IOException If there was an error changing the dropBehind + * setting. + * UnsupportedOperationException If this stream doesn't support + * setting the drop-behind. + */ + void setDropBehind(Boolean dropCache) + throws IOException, UnsupportedOperationException; +} +``` + +This interface allows callers to change policies used inside HDFS. + +Implementations MUST return `true` for the call + +```java +StreamCapabilities.hasCapability("dropbehind"); +``` + + +## Durability, Concurrency, Consistency and Visibility of stream output. + +These are the aspects of the system behaviour which are not directly +covered in this (very simplistic) filesystem model, but which are visible +in production. + + +### Durability + +1. `OutputStream.write()` MAY persist the data, synchronously or asynchronously +1. `OutputStream.flush()` flushes data to the destination. There +are no strict persistence requirements. +1. `Syncable.hflush()` synchronously sends all outstaning data to the destination +filesystem. After returning to the caller, the data MUST be visible to other readers, +it MAY be durable. That is: it does not have to be persisted, merely guaranteed +to be consistently visible to all clients attempting to open a new stream reading +data at the path. +1. `Syncable.hsync()` MUST transmit the data as per `hflush` and persist + that data to the underlying durable storage. +1. `close()` The first call to `close()` MUST flush out all remaining data in +the buffers, and persist it, as a call to `hsync()`. + + +Many applications call `flush()` far too often -such as at the end of every line written. +If this triggered an update of the data in persistent storage and any accompanying +metadata, distributed stores would overload fast. +Thus: `flush()` is often treated at most as a cue to flush data to the network +buffers -but not commit to writing any data. + +It is only the `Syncable` interface which offers guarantees. + +The two `Syncable` operations `hsync()` and `hflush()` differ purely by the extra guarantee of `hsync()`: the data must be persisted. +If `hsync()` is implemented, then `hflush()` can be implemented simply +by invoking `hsync()` + +```java +public void hflush() throws IOException { + hsync(); +} +``` + +This is perfectly acceptable as an implementation: the semantics of `hflush()` +are satisifed. +What is not acceptable is downgrading `hsync()` to `hflush()`, as the durability guarantee is no longer met. + + +### Concurrency + +1. The outcome of more than one process writing to the same file is undefined. + +1. An input stream opened to read a file *before the file was opened for writing* +MAY fetch data updated by writes to an OutputStream. +Because of buffering and caching, this is not a requirement +—and if an input stream does pick up updated data, the point at +which the updated data is read is undefined. This surfaces in object stores +where a `seek()` call which closes and re-opens the connection may pick up +updated data, while forward stream reads do not. Similarly, in block-oriented +filesystems, the data may be cached a block at a time —and changes only picked +up when a different block is read. + +1. A filesystem MAY allow the destination path to be manipulated while a stream +is writing to it —for example, `rename()` of the path or a parent; `delete()` of +a path or parent. In such a case, the outcome of future write operations on +the output stream is undefined. Some filesystems MAY implement locking to +prevent conflict. However, this tends to be rare on distributed filesystems, +for reasons well known in the literature. + +1. The Java API specification of `java.io.OutputStream` does not require +an instance of the class to be thread safe. +However, `org.apache.hadoop.hdfs.DFSOutputStream` +has a stronger thread safety model (possibly unintentionally). This fact is +relied upon in Apache HBase, as discovered in HADOOP-11708. Implementations +SHOULD be thread safe. *Note*: even the `DFSOutputStream` synchronization +model permits the output stream to have `close()` invoked while awaiting an +acknowledgement from datanode or namenode writes in an `hsync()` operation. + +### Consistency and Visibility + +There is no requirement for the data to be immediately visible to other applications +—not until a specific call to flush buffers or persist it to the underlying storage +medium are made. + +If an output stream is created with `FileSystem.create(path, overwrite==true)` +and there is an existing file at the path, that is `exists(FS, path)` holds, +then, the existing data is immediately unavailable; the data at the end of the +path MUST consist of an empty byte sequence `[]`, with consistent metadata. + + +```python +exists(FS, path) +(Stream', FS') = create(FS, path) +exists(FS', path) +getFileStatus(FS', path).getLen() = 0 +``` + +The metadata of a file (`length(FS, path)` in particular) SHOULD be consistent +with the contents of the file after `flush()` and `sync()`. + +```python +(Stream', FS') = create(FS, path) +(Stream'', FS'') = write(Stream', data) +(Stream''', FS''') hsync(Stream'') +exists(FS''', path) +getFileStatus(FS''', path).getLen() = len(data) +``` + +*HDFS does not do this except when the write crosses a block boundary*; to do +otherwise would overload the Namenode. Other stores MAY copy this behavior. + +As a result, while a file is being written +`length(Filesystem, Path)` MAY be less than the length of `data(Filesystem, Path)`. + +The metadata MUST be consistent with the contents of a file after the `close()` +operation. + +After the contents of an output stream have been persisted (`hflush()/hsync()`) +all new `open(FS, Path)` operations MUST return the updated data. + +After `close()` has been invoked on an output stream, +a call to `getFileStatus(path)` MUST return the final metadata of the written file, +including length and modification time. +The metadata of the file returned in any of the FileSystem `list` operations +MUST be consistent with this metadata. + +The value of `getFileStatus(path).getModificationTime()` is not defined +while a stream is being written to. +The timestamp MAY be updated while a file is being written, +especially after a `Syncable.hsync()` call. +The timestamps MUST be updated after the file is closed +to that of a clock value observed by the server during the `close()` call. +It is *likely* to be in the time and time zone of the filesystem, rather +than that of the client. + +Formally, if a `close()` operation triggers an interaction with a server +which starts at server-side time `t1` and completes at time `t2` with a successfully +written file, then the last modification time SHOULD be a time `t` where +`t1 <= t <= t2` + +## Issues with the Hadoop Output Stream model. + +There are some known issues with the output stream model as offered by Hadoop, +specifically about the guarantees about when data is written and persisted +—and when the metadata is synchronized. +These are where implementation aspects of HDFS and the "Local" filesystem +do not follow the simple model of the filesystem used in this specification. + +### HDFS + +#### HDFS: `hsync()` only syncs the latest block + +The reference implementation, `DFSOutputStream` will block until an +acknowledgement is received from the datanodes: that is, all hosts in the +replica write chain have successfully written the file. + +That means that the expectation callers may have is that the return of the +method call contains visibility and durability guarantees which other +implementations must maintain. + +Note, however, that the reference `DFSOutputStream.hsync()` call only actually +persists *the current block*. If there have been a series of writes since the +last sync, such that a block boundary has been crossed. The `hsync()` call +claims only to write the most recent. + +From the javadocs of `DFSOutputStream.hsync(EnumSet syncFlags)` + +> Note that only the current block is flushed to the disk device. +> To guarantee durable sync across block boundaries the stream should +> be created with {@link CreateFlag#SYNC_BLOCK}. + + +This is an important HDFS implementation detail which must not be ignored by +anyone relying on HDFS to provide a Write-Ahead-Log or other database structure +where the requirement of the application is that +"all preceeding bytes MUST have been persisted before the commit flag in the WAL +is flushed" + +See [Stonebraker81], Michael Stonebraker, _Operating System Support for Database Management_, +1981, for a discussion on this topic. + +If you do need `hsync()` to have synced every block in a very large write, call +it regularly. + +#### HDFS: delayed visibility of metadata updates. + +That HDFS file metadata often lags the content of a file being written +to is not something everyone expects, nor convenient for any program trying +to pick up updated data in a file being written. Most visible is the length +of a file returned in the various `list` commands and `getFileStatus` —this +is often out of date. + +As HDFS only supports file growth in its output operations, this means +that the size of the file as listed in the metadata may be less than or equal +to the number of available bytes —but never larger. This is a guarantee which +is also held + +One algorithm to determine whether a file in HDFS is updated is: + +1. Remember the last read position `pos` in the file, using `0` if this is the initial +read. +1. Use `getFileStatus(FS, Path)` to query the updated length of the file as +recorded in the metadata. +1. If `Status.length > pos`, the file has grown. +1. If the number has not changed, then + 1. Reopen the file. + 1. `seek(pos)` to that location + 1. If `read() != -1`, there is new data. + +This algorithm works for filesystems which are consistent with metadata and +data, as well as HDFS. What is important to know is that, for an open file +`getFileStatus(FS, path).getLen() == 0` does not imply that `data(FS, path)` is +empty. + +When an output stream in HDFS is closed; the newly written data is not immediately +written to disk unless HDFS is deployed with `dfs.datanode.synconclose` set to +true. Otherwise it is cached and written to disk later. + +### Local Filesystem, `file:` + +`LocalFileSystem`, `file:`, (or any other `FileSystem` implementation based on +`ChecksumFileSystem`) has a different issue. If an output stream +is obtained from `create()` and `FileSystem.setWriteChecksum(false)` has +*not* been called on the filesystem, then the stream only flushes as much +local data as can be written to full checksummed blocks of data. + +That is, the hsync/hflush operations are not guaranteed to write all the pending +data until the file is finally closed. + +For this reason, the local fileystem accessed via `file://` URLs +does not support `Syncable` unless `setWriteChecksum(false)` was +called on that FileSystem instance so as to disable checksum creation. +After which, obviously, checksums are not generated for any file. +Is +### Checksummed output streams + +Because `org.apache.hadoop.fs.FSOutputSummer` and +`org.apache.hadoop.fs.ChecksumFileSystem.ChecksumFSOutputSummer` +implement the underlying checksummed output stream used by HDFS and +other filesystems, it provides some of the core semantics of the output +stream behavior. + +1. The `close()` call is unsynchronized, re-entrant and may attempt +to close the stream more than once. +1. It is possible to call `write(int)` on a closed stream (but not +`write(byte[], int, int)`). +1. It is possible to call `flush()` on a closed stream. + +Behaviors 1 and 2 really have to be considered bugs to fix, albeit with care. + +Behavior 3 has to be considered a defacto standard, for other implementations +to copy. + +### Object Stores + +Object store streams MAY buffer the entire stream's output +until the final `close()` operation triggers a single `PUT` of the data +and materialization of the final output. + +This significantly changes their behaviour compared to that of +POSIX filesystems and that specified in this document. + +#### Visibility of newly created objects + +There is no guarantee that any file will be visible at the path of an output +stream after the output stream is created . + +That is: while `create(FS, path, boolean)` returns a new stream + +```python +Stream' = (path, true, []) +``` + +The other postcondition of the operation, `data(FS', path) == []` MAY NOT +hold, in which case: + +1. `exists(FS, p)` MAY return false. +1. If a file was created with `overwrite = True`, the existing data MAY still +be visible: `data(FS', path) = data(FS, path)`. + +1. The check for existing data in a `create()` call with `overwrite=False`, may +take place in the `create()` call itself, in the `close()` call prior to/during +the write, or at some point in between. In the special case that the +object store supports an atomic `PUT` operation, the check for existence of +existing data and the subsequent creation of data at the path contains a race +condition: other clients may create data at the path between the existence check +and the subsequent write. + +1. Calls to `create(FS, Path, overwrite=false)` MAY succeed, returning a new +`OutputStream`, even while another stream is open and writing to the destination +path. + +This allows for the following sequence of operations, which would +raise an exception in the second `open()` call if invoked against HDFS: + +```python +Stream1 = open(FS, path, false) +sleep(200) +Stream2 = open(FS, path, false) +Stream.write('a') +Stream1.close() +Stream2.close() +``` + +For anyone wondering why the clients don't create a 0-byte file in the `create()` call, +it would cause problems after `close()` —the marker file could get +returned in `open()` calls instead of the final data. + +#### Visibility of the output of a stream after `close()` + +One guarantee which Object Stores SHOULD make is the same as those of POSIX +filesystems: After a stream `close()` call returns, the data MUST be persisted +durably and visible to all callers. Unfortunately, even that guarantee is +not always met: + +1. Existing data on a path MAY be visible for an indeterminate period of time. + +1. If the store has any form of create inconsistency or buffering of negative +existence probes, then even after the stream's `close()` operation has returned, +`getFileStatus(FS, path)` and `open(FS, path)` may fail with a `FileNotFoundException`. + +In their favour, the atomicity of the store's PUT operations do offer their +own guarantee: a newly created object is either absent or all of its data +is present: the act of instantiating the object, while potentially exhibiting +create inconsistency, is atomic. Applications may be able to use that fact +to their advantage. + +The [Abortable](abortable.html) interface exposes this ability to abort an output +stream before its data is made visible, so can be used for checkpointing and similar +operations. + +## Implementors notes. + +### Always implement `Syncable` -even if just to throw `UnsupportedOperationException` + +Because `FSDataOutputStream` silently downgrades `Syncable.hflush()` +and `Syncable.hsync()` to `wrappedStream.flush()`, callers of the +API MAY be misled into believing that their data has been flushed/synced +after syncing to a stream which does not support the APIs. + +Implementations SHOULD implement the API but +throw `UnsupportedOperationException`. + +### `StreamCapabilities` + +Implementors of filesystem clients SHOULD implement the `StreamCapabilities` +interface and its `hasCapabilities()` method to to declare whether or not +an output streams offer the visibility and durability guarantees of `Syncable`. + +Implementors of `StreamCapabilities.hasCapabilities()` MUST NOT declare that +they support the `hflush` and `hsync` capabilities on streams where this is not true. + +Sometimes streams pass their data to store, but the far end may not +sync it all the way to disk. That is not something the client can determine. +Here: if the client code is making the hflush/hsync passes these requests +on to the distributed FS, it SHOULD declare that it supports them. + +### Metadata updates + +Implementors MAY NOT update a file's metadata (length, date, ...) after +every `hsync()` call. HDFS doesn't, except when the written data crosses +a block boundary. + + + +### Does `close()` synchronize and persist data? + +By default, HDFS does not immediately data to disk when a stream is closed; it will +be asynchronously saved to disk. + +This does not mean that users do not expect it. + +The behavior as implemented is similar to the write-back aspect's of NFS's +[caching](https://docstore.mik.ua/orelly/networking_2ndEd/nfs/ch07_04.htm). +`DFSClient.close()` is performing an `hflush()` to the client to upload +all data to the datanodes. + +1. `close()` SHALL return once the guarantees of `hflush()` are met: the data is + visible to others. +1. For durability guarantees, `hsync()` MUST be called first. \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.2/CHANGELOG.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.2/CHANGELOG.md new file mode 100644 index 0000000000000..4d6a0f1102981 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.2/CHANGELOG.md @@ -0,0 +1,576 @@ + + +# Apache Hadoop Changelog + +## Release 3.2.2 - 2021-01-03 + + + +### NEW FEATURES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HADOOP-15691](https://issues.apache.org/jira/browse/HADOOP-15691) | Add PathCapabilities to FS and FC to complement StreamCapabilities | Major | . | Steve Loughran | Steve Loughran | +| [YARN-9760](https://issues.apache.org/jira/browse/YARN-9760) | Support configuring application priorities on a workflow level | Major | . | Jonathan Hung | Varun Saxena | +| [HDFS-14905](https://issues.apache.org/jira/browse/HDFS-14905) | Backport HDFS persistent memory read cache support to branch-3.2 | Major | caching, datanode | Feilong He | Feilong He | +| [HDFS-12943](https://issues.apache.org/jira/browse/HDFS-12943) | Consistent Reads from Standby Node | Major | hdfs | Konstantin Shvachko | Konstantin Shvachko | +| [HADOOP-16790](https://issues.apache.org/jira/browse/HADOOP-16790) | Add Write Convenience Methods | Minor | . | David Mollitor | David Mollitor | +| [HADOOP-17210](https://issues.apache.org/jira/browse/HADOOP-17210) | backport HADOOP-15691 PathCapabilities API to branch-3.2 | Major | fs, fs/s3 | Steve Loughran | Steve Loughran | + + +### IMPROVEMENTS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [YARN-8750](https://issues.apache.org/jira/browse/YARN-8750) | Refactor TestQueueMetrics | Minor | resourcemanager | Szilard Nemeth | Szilard Nemeth | +| [HADOOP-15849](https://issues.apache.org/jira/browse/HADOOP-15849) | Upgrade netty version to 3.10.6 | Major | . | Xiao Chen | Xiao Chen | +| [HDFS-12946](https://issues.apache.org/jira/browse/HDFS-12946) | Add a tool to check rack configuration against EC policies | Major | erasure-coding | Xiao Chen | Kitti Nanasi | +| [HDFS-14113](https://issues.apache.org/jira/browse/HDFS-14113) | EC : Add Configuration to restrict UserDefined Policies | Major | erasure-coding | Ayush Saxena | Ayush Saxena | +| [HDFS-14006](https://issues.apache.org/jira/browse/HDFS-14006) | Refactor name node to allow different token verification implementations | Major | . | CR Hota | CR Hota | +| [HADOOP-15909](https://issues.apache.org/jira/browse/HADOOP-15909) | KeyProvider class should implement Closeable | Major | kms | Kuhu Shukla | Kuhu Shukla | +| [HDFS-14061](https://issues.apache.org/jira/browse/HDFS-14061) | Check if the cluster topology supports the EC policy before setting, enabling or adding it | Major | erasure-coding, hdfs | Kitti Nanasi | Kitti Nanasi | +| [HDFS-14187](https://issues.apache.org/jira/browse/HDFS-14187) | Make warning message more clear when there are not enough data nodes for EC write | Major | erasure-coding | Kitti Nanasi | Kitti Nanasi | +| [HDFS-14125](https://issues.apache.org/jira/browse/HDFS-14125) | Use parameterized log format in ECTopologyVerifier | Trivial | erasure-coding | Kitti Nanasi | Kitti Nanasi | +| [HDFS-14188](https://issues.apache.org/jira/browse/HDFS-14188) | Make hdfs ec -verifyClusterSetup command accept an erasure coding policy as a parameter | Major | erasure-coding | Kitti Nanasi | Kitti Nanasi | +| [HADOOP-16126](https://issues.apache.org/jira/browse/HADOOP-16126) | ipc.Client.stop() may sleep too long to wait for all connections | Major | ipc | Tsz-wo Sze | Tsz-wo Sze | +| [HADOOP-15014](https://issues.apache.org/jira/browse/HADOOP-15014) | KMS should log the IP address of the clients | Major | kms | Zsombor Gegesy | Zsombor Gegesy | +| [HDFS-14460](https://issues.apache.org/jira/browse/HDFS-14460) | DFSUtil#getNamenodeWebAddr should return HTTPS address based on policy configured | Major | . | CR Hota | CR Hota | +| [HDFS-14624](https://issues.apache.org/jira/browse/HDFS-14624) | When decommissioning a node, log remaining blocks to replicate periodically | Major | namenode | Stephen O'Donnell | Stephen O'Donnell | +| [HDFS-13693](https://issues.apache.org/jira/browse/HDFS-13693) | Remove unnecessary search in INodeDirectory.addChild during image loading | Major | namenode | zhouyingchao | Lisheng Sun | +| [HDFS-14313](https://issues.apache.org/jira/browse/HDFS-14313) | Get hdfs used space from FsDatasetImpl#volumeMap#ReplicaInfo in memory instead of df/du | Major | datanode, performance | Lisheng Sun | Lisheng Sun | +| [HDFS-14678](https://issues.apache.org/jira/browse/HDFS-14678) | Allow triggerBlockReport to a specific namenode | Major | datanode | Leon Gao | Leon Gao | +| [HDFS-14523](https://issues.apache.org/jira/browse/HDFS-14523) | Remove excess read lock for NetworkToplogy | Major | . | Wu Weiwei | Wu Weiwei | +| [HDFS-14497](https://issues.apache.org/jira/browse/HDFS-14497) | Write lock held by metasave impact following RPC processing | Major | namenode | Xiaoqiao He | Xiaoqiao He | +| [HADOOP-16531](https://issues.apache.org/jira/browse/HADOOP-16531) | Log more detail for slow RPC | Major | . | Chen Zhang | Chen Zhang | +| [YARN-9764](https://issues.apache.org/jira/browse/YARN-9764) | Print application submission context label in application summary | Major | . | Jonathan Hung | Manoj Kumar | +| [YARN-9824](https://issues.apache.org/jira/browse/YARN-9824) | Fall back to configured queue ordering policy class name | Major | . | Jonathan Hung | Jonathan Hung | +| [HADOOP-16069](https://issues.apache.org/jira/browse/HADOOP-16069) | Support configure ZK\_DTSM\_ZK\_KERBEROS\_PRINCIPAL in ZKDelegationTokenSecretManager using principal with Schema /\_HOST | Minor | common | luhuachao | luhuachao | +| [YARN-9762](https://issues.apache.org/jira/browse/YARN-9762) | Add submission context label to audit logs | Major | . | Jonathan Hung | Manoj Kumar | +| [HDFS-14850](https://issues.apache.org/jira/browse/HDFS-14850) | Optimize FileSystemAccessService#getFileSystemConfiguration | Major | httpfs, performance | Lisheng Sun | Lisheng Sun | +| [HDFS-14192](https://issues.apache.org/jira/browse/HDFS-14192) | Track missing DFS operations in Statistics and StorageStatistics | Major | . | Ayush Saxena | Ayush Saxena | +| [YARN-9356](https://issues.apache.org/jira/browse/YARN-9356) | Add more tests to ratio method in TestResourceCalculator | Major | . | Szilard Nemeth | Zoltan Siegl | +| [HADOOP-16643](https://issues.apache.org/jira/browse/HADOOP-16643) | Update netty4 to the latest 4.1.42 | Major | . | Wei-Chiu Chuang | Lisheng Sun | +| [HADOOP-16640](https://issues.apache.org/jira/browse/HADOOP-16640) | WASB: Override getCanonicalServiceName() to return full url of WASB filesystem | Major | fs/azure | Da Zhou | Da Zhou | +| [HDFS-14915](https://issues.apache.org/jira/browse/HDFS-14915) | Move Superuser Check Before Taking Lock For Encryption API | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-14921](https://issues.apache.org/jira/browse/HDFS-14921) | Remove SuperUser Check in Setting Storage Policy in FileStatus During Listing | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-14923](https://issues.apache.org/jira/browse/HDFS-14923) | Remove dead code from HealthMonitor | Minor | . | Hui Fei | Hui Fei | +| [YARN-9914](https://issues.apache.org/jira/browse/YARN-9914) | Use separate configs for free disk space checking for full and not-full disks | Minor | yarn | Jim Brennan | Jim Brennan | +| [MAPREDUCE-7208](https://issues.apache.org/jira/browse/MAPREDUCE-7208) | Tuning TaskRuntimeEstimator | Minor | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-14942](https://issues.apache.org/jira/browse/HDFS-14942) | Change Log Level to debug in JournalNodeSyncer#syncWithJournalAtIndex | Minor | . | Lisheng Sun | Lisheng Sun | +| [HDFS-14979](https://issues.apache.org/jira/browse/HDFS-14979) | [Observer Node] Balancer should submit getBlocks to Observer Node when possible | Major | balancer & mover, hdfs | Erik Krogen | Erik Krogen | +| [HADOOP-16705](https://issues.apache.org/jira/browse/HADOOP-16705) | MBeanInfoBuilder puts unnecessary memory pressure on the system with a debug log | Major | metrics | Lukas Majercak | Lukas Majercak | +| [HADOOP-16712](https://issues.apache.org/jira/browse/HADOOP-16712) | Config ha.failover-controller.active-standby-elector.zk.op.retries is not in core-default.xml | Trivial | . | Wei-Chiu Chuang | Xieming Li | +| [HDFS-14952](https://issues.apache.org/jira/browse/HDFS-14952) | Skip safemode if blockTotal is 0 in new NN | Trivial | namenode | Rajesh Balamohan | Xiaoqiao He | +| [YARN-8842](https://issues.apache.org/jira/browse/YARN-8842) | Expose metrics for custom resource types in QueueMetrics | Major | . | Szilard Nemeth | Szilard Nemeth | +| [YARN-9966](https://issues.apache.org/jira/browse/YARN-9966) | Code duplication in UserGroupMappingPlacementRule | Major | . | Szilard Nemeth | Kevin Su | +| [YARN-9937](https://issues.apache.org/jira/browse/YARN-9937) | Add missing queue configs in RMWebService#CapacitySchedulerQueueInfo | Major | capacity scheduler | Prabhu Joseph | Prabhu Joseph | +| [HADOOP-16718](https://issues.apache.org/jira/browse/HADOOP-16718) | Allow disabling Server Name Indication (SNI) for Jetty | Major | . | Siyao Meng | Aravindan Vijayan | +| [HADOOP-16729](https://issues.apache.org/jira/browse/HADOOP-16729) | Extract version numbers to head of pom.xml | Minor | build | Tamas Penzes | Tamas Penzes | +| [HADOOP-16735](https://issues.apache.org/jira/browse/HADOOP-16735) | Make it clearer in config default that EnvironmentVariableCredentialsProvider supports AWS\_SESSION\_TOKEN | Minor | documentation, fs/s3 | Mingliang Liu | Mingliang Liu | +| [YARN-10012](https://issues.apache.org/jira/browse/YARN-10012) | Guaranteed and max capacity queue metrics for custom resources | Major | . | Jonathan Hung | Manikandan R | +| [HDFS-15050](https://issues.apache.org/jira/browse/HDFS-15050) | Optimize log information when DFSInputStream meet CannotObtainBlockLengthException | Major | dfsclient | Xiaoqiao He | Xiaoqiao He | +| [YARN-10033](https://issues.apache.org/jira/browse/YARN-10033) | TestProportionalCapacityPreemptionPolicy not initializing vcores for effective max resources | Major | capacity scheduler, test | Eric Payne | Eric Payne | +| [YARN-10039](https://issues.apache.org/jira/browse/YARN-10039) | Allow disabling app submission from REST endpoints | Major | . | Jonathan Hung | Jonathan Hung | +| [YARN-9894](https://issues.apache.org/jira/browse/YARN-9894) | CapacitySchedulerPerf test for measuring hundreds of apps in a large number of queues. | Major | capacity scheduler, test | Eric Payne | Eric Payne | +| [HADOOP-16771](https://issues.apache.org/jira/browse/HADOOP-16771) | Update checkstyle to 8.26 and maven-checkstyle-plugin to 3.1.0 | Major | build | Andras Bokor | Andras Bokor | +| [YARN-10009](https://issues.apache.org/jira/browse/YARN-10009) | In Capacity Scheduler, DRC can treat minimum user limit percent as a max when custom resource is defined | Critical | capacity scheduler | Eric Payne | Eric Payne | +| [HDFS-12999](https://issues.apache.org/jira/browse/HDFS-12999) | When reach the end of the block group, it may not need to flush all the data packets(flushAllInternals) twice. | Major | erasure-coding, hdfs-client | lufei | lufei | +| [HDFS-15074](https://issues.apache.org/jira/browse/HDFS-15074) | DataNode.DataTransfer thread should catch all the expception and log it. | Major | datanode | Surendra Singh Lilhore | Hemanth Boyina | +| [HDFS-14740](https://issues.apache.org/jira/browse/HDFS-14740) | Recover data blocks from persistent memory read cache during datanode restarts | Major | caching, datanode | Feilong He | Feilong He | +| [HADOOP-16775](https://issues.apache.org/jira/browse/HADOOP-16775) | DistCp reuses the same temp file within the task attempt for different files. | Major | tools/distcp | Amir Shenavandeh | Amir Shenavandeh | +| [HDFS-15097](https://issues.apache.org/jira/browse/HDFS-15097) | Purge log in KMS and HttpFS | Minor | httpfs, kms | Doris Gu | Doris Gu | +| [HADOOP-16753](https://issues.apache.org/jira/browse/HADOOP-16753) | Refactor HAAdmin | Major | ha | Akira Ajisaka | Xieming Li | +| [HDFS-14968](https://issues.apache.org/jira/browse/HDFS-14968) | Add ability to know datanode staleness | Minor | datanode, logging, namenode | Ahmed Hussein | Ahmed Hussein | +| [YARN-7913](https://issues.apache.org/jira/browse/YARN-7913) | Improve error handling when application recovery fails with exception | Major | resourcemanager | Gergo Repas | Wilfred Spiegelenburg | +| [HDFS-15117](https://issues.apache.org/jira/browse/HDFS-15117) | EC: Add getECTopologyResultForPolicies to DistributedFileSystem | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15119](https://issues.apache.org/jira/browse/HDFS-15119) | Allow expiration of cached locations in DFSInputStream | Minor | dfsclient | Ahmed Hussein | Ahmed Hussein | +| [MAPREDUCE-7262](https://issues.apache.org/jira/browse/MAPREDUCE-7262) | MRApp helpers block for long intervals (500ms) | Minor | mr-am | Ahmed Hussein | Ahmed Hussein | +| [MAPREDUCE-7260](https://issues.apache.org/jira/browse/MAPREDUCE-7260) | Cross origin request support for Job history server web UI | Critical | jobhistoryserver | Adam Antal | Adam Antal | +| [YARN-10084](https://issues.apache.org/jira/browse/YARN-10084) | Allow inheritance of max app lifetime / default app lifetime | Major | capacity scheduler | Eric Payne | Eric Payne | +| [HDFS-12491](https://issues.apache.org/jira/browse/HDFS-12491) | Support wildcard in CLASSPATH for libhdfs | Major | libhdfs | John Zhuge | Muhammad Samir Khan | +| [YARN-10116](https://issues.apache.org/jira/browse/YARN-10116) | Expose diagnostics in RMAppManager summary | Major | . | Jonathan Hung | Jonathan Hung | +| [HADOOP-16739](https://issues.apache.org/jira/browse/HADOOP-16739) | Fix native build failure of hadoop-pipes on CentOS 8 | Major | tools/pipes | Masatake Iwasaki | Masatake Iwasaki | +| [HADOOP-16847](https://issues.apache.org/jira/browse/HADOOP-16847) | Test TestGroupsCaching fail if HashSet iterates in a different order | Minor | test | testfixer0 | testfixer0 | +| [HDFS-14758](https://issues.apache.org/jira/browse/HDFS-14758) | Decrease lease hard limit | Minor | . | Eric Payne | Hemanth Boyina | +| [HDFS-15086](https://issues.apache.org/jira/browse/HDFS-15086) | Block scheduled counter never get decremet if the block got deleted before replication. | Major | 3.1.1 | Surendra Singh Lilhore | Hemanth Boyina | +| [HDFS-15174](https://issues.apache.org/jira/browse/HDFS-15174) | Optimize ReplicaCachingGetSpaceUsed by reducing unnecessary io operations | Major | . | Lisheng Sun | Lisheng Sun | +| [YARN-9018](https://issues.apache.org/jira/browse/YARN-9018) | Add functionality to AuxiliaryLocalPathHandler to return all locations to read for a given path | Major | . | Kuhu Shukla | Kuhu Shukla | +| [HDFS-14861](https://issues.apache.org/jira/browse/HDFS-14861) | Reset LowRedundancyBlocks Iterator periodically | Major | namenode | Stephen O'Donnell | Stephen O'Donnell | +| [HADOOP-16899](https://issues.apache.org/jira/browse/HADOOP-16899) | Update HdfsDesign.md to reduce ambiguity | Minor | documentation | Akshay Nehe | Akshay Nehe | +| [HADOOP-16772](https://issues.apache.org/jira/browse/HADOOP-16772) | Extract version numbers to head of pom.xml (addendum) | Major | build | Tamas Penzes | Tamas Penzes | +| [HDFS-15197](https://issues.apache.org/jira/browse/HDFS-15197) | [SBN read] Change ObserverRetryOnActiveException log to debug | Minor | hdfs | Chen Liang | Chen Liang | +| [HADOOP-16935](https://issues.apache.org/jira/browse/HADOOP-16935) | Backport HADOOP-10848. Cleanup calling of sun.security.krb5.Config to branch-3.2 | Minor | . | Siyao Meng | Siyao Meng | +| [YARN-10200](https://issues.apache.org/jira/browse/YARN-10200) | Add number of containers to RMAppManager summary | Major | . | Jonathan Hung | Jonathan Hung | +| [HADOOP-16952](https://issues.apache.org/jira/browse/HADOOP-16952) | Add .diff to gitignore | Minor | . | Ayush Saxena | Ayush Saxena | +| [MAPREDUCE-7266](https://issues.apache.org/jira/browse/MAPREDUCE-7266) | historyContext doesn't need to be a class attribute inside JobHistoryServer | Minor | jobhistoryserver | Siddharth Ahuja | Siddharth Ahuja | +| [YARN-10003](https://issues.apache.org/jira/browse/YARN-10003) | YarnConfigurationStore#checkVersion throws exception that belongs to RMStateStore | Major | . | Szilard Nemeth | Benjamin Teke | +| [YARN-10212](https://issues.apache.org/jira/browse/YARN-10212) | Create separate configuration for max global AM attempts | Major | . | Jonathan Hung | Bilwa S T | +| [YARN-5277](https://issues.apache.org/jira/browse/YARN-5277) | When localizers fail due to resource timestamps being out, provide more diagnostics | Major | nodemanager | Steve Loughran | Siddharth Ahuja | +| [YARN-9995](https://issues.apache.org/jira/browse/YARN-9995) | Code cleanup in TestSchedConfCLI | Minor | . | Szilard Nemeth | Bilwa S T | +| [YARN-9354](https://issues.apache.org/jira/browse/YARN-9354) | Resources should be created with ResourceTypesTestHelper instead of TestUtils | Trivial | . | Szilard Nemeth | Andras Gyori | +| [YARN-10002](https://issues.apache.org/jira/browse/YARN-10002) | Code cleanup and improvements in ConfigurationStoreBaseTest | Minor | . | Szilard Nemeth | Benjamin Teke | +| [YARN-9954](https://issues.apache.org/jira/browse/YARN-9954) | Configurable max application tags and max tag length | Major | . | Jonathan Hung | Bilwa S T | +| [YARN-10001](https://issues.apache.org/jira/browse/YARN-10001) | Add explanation of unimplemented methods in InMemoryConfigurationStore | Major | . | Szilard Nemeth | Siddharth Ahuja | +| [HADOOP-17001](https://issues.apache.org/jira/browse/HADOOP-17001) | The suffix name of the unified compression class | Major | io | bianqi | bianqi | +| [YARN-9997](https://issues.apache.org/jira/browse/YARN-9997) | Code cleanup in ZKConfigurationStore | Minor | . | Szilard Nemeth | Andras Gyori | +| [YARN-9996](https://issues.apache.org/jira/browse/YARN-9996) | Code cleanup in QueueAdminConfigurationMutationACLPolicy | Major | . | Szilard Nemeth | Siddharth Ahuja | +| [YARN-9998](https://issues.apache.org/jira/browse/YARN-9998) | Code cleanup in LeveldbConfigurationStore | Minor | . | Szilard Nemeth | Benjamin Teke | +| [YARN-9999](https://issues.apache.org/jira/browse/YARN-9999) | TestFSSchedulerConfigurationStore: Extend from ConfigurationStoreBaseTest, general code cleanup | Minor | . | Szilard Nemeth | Benjamin Teke | +| [HDFS-15295](https://issues.apache.org/jira/browse/HDFS-15295) | AvailableSpaceBlockPlacementPolicy should use chooseRandomWithStorageTypeTwoTrial() for better performance. | Minor | . | Jinglun | Jinglun | +| [YARN-10189](https://issues.apache.org/jira/browse/YARN-10189) | Code cleanup in LeveldbRMStateStore | Minor | . | Benjamin Teke | Benjamin Teke | +| [HADOOP-16886](https://issues.apache.org/jira/browse/HADOOP-16886) | Add hadoop.http.idle\_timeout.ms to core-default.xml | Major | . | Wei-Chiu Chuang | Lisheng Sun | +| [YARN-10260](https://issues.apache.org/jira/browse/YARN-10260) | Allow transitioning queue from DRAINING to RUNNING state | Major | . | Jonathan Hung | Bilwa S T | +| [HADOOP-17042](https://issues.apache.org/jira/browse/HADOOP-17042) | Hadoop distcp throws "ERROR: Tools helper ///usr/lib/hadoop/libexec/tools/hadoop-distcp.sh was not found" | Minor | tools/distcp | Aki Tanaka | Aki Tanaka | +| [HADOOP-14698](https://issues.apache.org/jira/browse/HADOOP-14698) | Make copyFromLocal's -t option available for put as well | Major | . | Andras Bokor | Andras Bokor | +| [YARN-6492](https://issues.apache.org/jira/browse/YARN-6492) | Generate queue metrics for each partition | Major | capacity scheduler | Jonathan Hung | Manikandan R | +| [HADOOP-17047](https://issues.apache.org/jira/browse/HADOOP-17047) | TODO comments exist in trunk while the related issues are already fixed. | Trivial | . | Rungroj Maipradit | Rungroj Maipradit | +| [HDFS-15406](https://issues.apache.org/jira/browse/HDFS-15406) | Improve the speed of Datanode Block Scan | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HADOOP-17090](https://issues.apache.org/jira/browse/HADOOP-17090) | Increase precommit job timeout from 5 hours to 20 hours | Major | build | Akira Ajisaka | Akira Ajisaka | +| [YARN-10297](https://issues.apache.org/jira/browse/YARN-10297) | TestContinuousScheduling#testFairSchedulerContinuousSchedulingInitTime fails intermittently | Major | . | Jonathan Hung | Jim Brennan | +| [HADOOP-17127](https://issues.apache.org/jira/browse/HADOOP-17127) | Use RpcMetrics.TIMEUNIT to initialize rpc queueTime and processingTime | Minor | common | Jim Brennan | Jim Brennan | +| [HDFS-15404](https://issues.apache.org/jira/browse/HDFS-15404) | ShellCommandFencer should expose info about source | Major | . | Chen Liang | Chen Liang | +| [HADOOP-17147](https://issues.apache.org/jira/browse/HADOOP-17147) | Dead link in hadoop-kms/index.md.vm | Minor | documentation, kms | Akira Ajisaka | Xieming Li | +| [YARN-10343](https://issues.apache.org/jira/browse/YARN-10343) | Legacy RM UI should include labeled metrics for allocated, total, and reserved resources. | Major | . | Eric Payne | Eric Payne | +| [YARN-1529](https://issues.apache.org/jira/browse/YARN-1529) | Add Localization overhead metrics to NM | Major | nodemanager | Gera Shegalov | Jim Brennan | +| [YARN-10251](https://issues.apache.org/jira/browse/YARN-10251) | Show extended resources on legacy RM UI. | Major | . | Eric Payne | Eric Payne | +| [HADOOP-17159](https://issues.apache.org/jira/browse/HADOOP-17159) | Make UGI support forceful relogin from keytab ignoring the last login time | Major | security | Sandeep Guggilam | Sandeep Guggilam | +| [YARN-10353](https://issues.apache.org/jira/browse/YARN-10353) | Log vcores used and cumulative cpu in containers monitor | Minor | yarn | Jim Brennan | Jim Brennan | +| [YARN-10369](https://issues.apache.org/jira/browse/YARN-10369) | Make NMTokenSecretManagerInRM sending NMToken for nodeId DEBUG | Minor | yarn | Jim Brennan | Jim Brennan | +| [YARN-10390](https://issues.apache.org/jira/browse/YARN-10390) | LeafQueue: retain user limits cache across assignContainers() calls | Major | capacity scheduler, capacityscheduler | Muhammad Samir Khan | Muhammad Samir Khan | +| [HDFS-15574](https://issues.apache.org/jira/browse/HDFS-15574) | Remove unnecessary sort of block list in DirectoryScanner | Major | . | Stephen O'Donnell | Stephen O'Donnell | +| [HDFS-15583](https://issues.apache.org/jira/browse/HDFS-15583) | Backport DirectoryScanner improvements HDFS-14476, HDFS-14751 and HDFS-15048 to branch 3.2 and 3.1 | Major | datanode | Stephen O'Donnell | Stephen O'Donnell | +| [HDFS-15581](https://issues.apache.org/jira/browse/HDFS-15581) | Access Controlled HTTPFS Proxy | Minor | httpfs | Richard | Richard | +| [HDFS-15415](https://issues.apache.org/jira/browse/HDFS-15415) | Reduce locking in Datanode DirectoryScanner | Major | datanode | Stephen O'Donnell | Stephen O'Donnell | +| [HADOOP-17287](https://issues.apache.org/jira/browse/HADOOP-17287) | Support new Instance by non default constructor by ReflectionUtils | Major | . | Baolong Mao | Baolong Mao | +| [YARN-10451](https://issues.apache.org/jira/browse/YARN-10451) | RM (v1) UI NodesPage can NPE when yarn.io/gpu resource type is defined. | Major | . | Eric Payne | Eric Payne | +| [YARN-9667](https://issues.apache.org/jira/browse/YARN-9667) | Container-executor.c duplicates messages to stdout | Major | nodemanager, yarn | Adam Antal | Peter Bacsko | +| [MAPREDUCE-7301](https://issues.apache.org/jira/browse/MAPREDUCE-7301) | Expose Mini MR Cluster attribute for testing | Minor | test | Swaroopa Kadam | Swaroopa Kadam | +| [HDFS-15567](https://issues.apache.org/jira/browse/HDFS-15567) | [SBN Read] HDFS should expose msync() API to allow downstream applications call it explicitly. | Major | ha, hdfs-client | Konstantin Shvachko | Konstantin Shvachko | +| [YARN-10450](https://issues.apache.org/jira/browse/YARN-10450) | Add cpu and memory utilization per node and cluster-wide metrics | Minor | yarn | Jim Brennan | Jim Brennan | +| [YARN-10475](https://issues.apache.org/jira/browse/YARN-10475) | Scale RM-NM heartbeat interval based on node utilization | Minor | yarn | Jim Brennan | Jim Brennan | +| [HDFS-15665](https://issues.apache.org/jira/browse/HDFS-15665) | Balancer logging improvement | Major | balancer & mover | Konstantin Shvachko | Konstantin Shvachko | +| [HADOOP-17342](https://issues.apache.org/jira/browse/HADOOP-17342) | Creating a token identifier should not do kerberos name resolution | Major | common | Jim Brennan | Jim Brennan | +| [YARN-10479](https://issues.apache.org/jira/browse/YARN-10479) | RMProxy should retry on SocketTimeout Exceptions | Major | yarn | Jim Brennan | Jim Brennan | +| [HDFS-15623](https://issues.apache.org/jira/browse/HDFS-15623) | Respect configured values of rpc.engine | Major | hdfs | Hector Sandoval Chaverri | Hector Sandoval Chaverri | +| [HDFS-14395](https://issues.apache.org/jira/browse/HDFS-14395) | Remove WARN Logging From Interrupts in DataStreamer | Minor | hdfs-client | David Mollitor | David Mollitor | +| [HADOOP-17367](https://issues.apache.org/jira/browse/HADOOP-17367) | Add InetAddress api to ProxyUsers.authorize | Major | performance, security | Ahmed Hussein | Ahmed Hussein | +| [MAPREDUCE-7304](https://issues.apache.org/jira/browse/MAPREDUCE-7304) | Enhance the map-reduce Job end notifier to be able to notify the given URL via a custom class | Major | mrv2 | Daniel Fritsi | Zoltán Erdmann | +| [MAPREDUCE-7309](https://issues.apache.org/jira/browse/MAPREDUCE-7309) | Improve performance of reading resource request for mapper/reducers from config | Major | applicationmaster | Wangda Tan | Peter Bacsko | +| [HADOOP-17389](https://issues.apache.org/jira/browse/HADOOP-17389) | KMS should log full UGI principal | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15717](https://issues.apache.org/jira/browse/HDFS-15717) | Improve fsck logging | Major | logging, namenode | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15751](https://issues.apache.org/jira/browse/HDFS-15751) | Add documentation for msync() API to filesystem.md | Major | documentation | Konstantin Shvachko | Konstantin Shvachko | + + +### BUG FIXES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HADOOP-15418](https://issues.apache.org/jira/browse/HADOOP-15418) | Hadoop KMSAuthenticationFilter needs to use getPropsByPrefix instead of iterator to avoid ConcurrentModificationException | Major | common | Suma Shivaprasad | Suma Shivaprasad | +| [HDFS-14004](https://issues.apache.org/jira/browse/HDFS-14004) | TestLeaseRecovery2#testCloseWhileRecoverLease fails intermittently in trunk | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-13959](https://issues.apache.org/jira/browse/HDFS-13959) | TestUpgradeDomainBlockPlacementPolicy is flaky | Major | . | Ayush Saxena | Ayush Saxena | +| [YARN-8948](https://issues.apache.org/jira/browse/YARN-8948) | PlacementRule interface should be for all YarnSchedulers | Major | . | Bibin Chundatt | Bibin Chundatt | +| [HADOOP-16013](https://issues.apache.org/jira/browse/HADOOP-16013) | DecayRpcScheduler decay thread should run as a daemon | Major | ipc | Erik Krogen | Erik Krogen | +| [HDFS-14175](https://issues.apache.org/jira/browse/HDFS-14175) | EC: Native XOR decoder should reset the output buffer before using it. | Major | ec, hdfs | Surendra Singh Lilhore | Ayush Saxena | +| [HDFS-14202](https://issues.apache.org/jira/browse/HDFS-14202) | "dfs.disk.balancer.max.disk.throughputInMBperSec" property is not working as per set value. | Major | diskbalancer | Ranith Sardar | Ranith Sardar | +| [HADOOP-16127](https://issues.apache.org/jira/browse/HADOOP-16127) | In ipc.Client, put a new connection could happen after stop | Major | ipc | Tsz-wo Sze | Tsz-wo Sze | +| [YARN-4901](https://issues.apache.org/jira/browse/YARN-4901) | QueueMetrics needs to be cleared before MockRM is initialized | Major | scheduler | Daniel Templeton | Peter Bacsko | +| [HADOOP-16161](https://issues.apache.org/jira/browse/HADOOP-16161) | NetworkTopology#getWeightUsingNetworkLocation return unexpected result | Major | net | Xiaoqiao He | Xiaoqiao He | +| [HDFS-14434](https://issues.apache.org/jira/browse/HDFS-14434) | webhdfs that connect secure hdfs should not use user.name parameter | Minor | webhdfs | KWON BYUNGCHANG | KWON BYUNGCHANG | +| [HDFS-14527](https://issues.apache.org/jira/browse/HDFS-14527) | Stop all DataNodes may result in NN terminate | Major | namenode | Xiaoqiao He | Xiaoqiao He | +| [HDFS-14494](https://issues.apache.org/jira/browse/HDFS-14494) | Move Server logging of StatedId inside receiveRequestState() | Major | . | Konstantin Shvachko | Shweta | +| [HDFS-14599](https://issues.apache.org/jira/browse/HDFS-14599) | HDFS-12487 breaks test TestDiskBalancer.testDiskBalancerWithFedClusterWithOneNameServiceEmpty | Major | diskbalancer | Wei-Chiu Chuang | Xiaoqiao He | +| [HDFS-14618](https://issues.apache.org/jira/browse/HDFS-14618) | Incorrect synchronization of ArrayList field (ArrayList is thread-unsafe). | Critical | . | Paul Ward | Paul Ward | +| [HDFS-14610](https://issues.apache.org/jira/browse/HDFS-14610) | HashMap is not thread safe. Field storageMap is typically synchronized by storageMap. However, in one place, field storageMap is not protected with synchronized. | Critical | . | Paul Ward | Paul Ward | +| [HDFS-14499](https://issues.apache.org/jira/browse/HDFS-14499) | Misleading REM\_QUOTA value with snapshot and trash feature enabled for a directory | Major | snapshots | Shashikant Banerjee | Shashikant Banerjee | +| [HADOOP-16451](https://issues.apache.org/jira/browse/HADOOP-16451) | Update jackson-databind to 2.9.9.1 | Major | . | Wei-Chiu Chuang | Siyao Meng | +| [HDFS-14647](https://issues.apache.org/jira/browse/HDFS-14647) | NPE during secure namenode startup | Major | hdfs | Fengnan Li | Fengnan Li | +| [HADOOP-16461](https://issues.apache.org/jira/browse/HADOOP-16461) | Regression: FileSystem cache lock parses XML within the lock | Major | fs | Gopal Vijayaraghavan | Gopal Vijayaraghavan | +| [HDFS-14660](https://issues.apache.org/jira/browse/HDFS-14660) | [SBN Read] ObserverNameNode should throw StandbyException for requests not from ObserverProxyProvider | Major | . | Chao Sun | Chao Sun | +| [HADOOP-16460](https://issues.apache.org/jira/browse/HADOOP-16460) | ABFS: fix for Sever Name Indication (SNI) | Major | fs/azure | Thomas Marqardt | Sneha Vijayarajan | +| [HDFS-14569](https://issues.apache.org/jira/browse/HDFS-14569) | Result of crypto -listZones is not formatted properly | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HADOOP-12282](https://issues.apache.org/jira/browse/HADOOP-12282) | Connection thread's name should be updated after address changing is detected | Major | ipc | zhouyingchao | Lisheng Sun | +| [HDFS-14686](https://issues.apache.org/jira/browse/HDFS-14686) | HttpFS: HttpFSFileSystem#getErasureCodingPolicy always returns null | Major | httpfs | Siyao Meng | Siyao Meng | +| [HADOOP-15865](https://issues.apache.org/jira/browse/HADOOP-15865) | ConcurrentModificationException in Configuration.overlay() method | Major | . | Oleksandr Shevchenko | Oleksandr Shevchenko | +| [HADOOP-16487](https://issues.apache.org/jira/browse/HADOOP-16487) | Update jackson-databind to 2.9.9.2 | Critical | . | Siyao Meng | Siyao Meng | +| [HDFS-14759](https://issues.apache.org/jira/browse/HDFS-14759) | HDFS cat logs an info message | Major | . | Eric Badger | Eric Badger | +| [HADOOP-16533](https://issues.apache.org/jira/browse/HADOOP-16533) | Update jackson-databind to 2.9.9.3 | Major | . | Akira Ajisaka | Akira Ajisaka | +| [HDFS-14699](https://issues.apache.org/jira/browse/HDFS-14699) | Erasure Coding: Storage not considered in live replica when replication streams hard limit reached to threshold | Critical | ec | Zhao Yi Ming | Zhao Yi Ming | +| [YARN-9833](https://issues.apache.org/jira/browse/YARN-9833) | Race condition when DirectoryCollection.checkDirs() runs during container launch | Major | . | Peter Bacsko | Peter Bacsko | +| [YARN-9837](https://issues.apache.org/jira/browse/YARN-9837) | YARN Service fails to fetch status for Stopped apps with bigger spec files | Major | yarn-native-services | Tarun Parimi | Tarun Parimi | +| [YARN-2255](https://issues.apache.org/jira/browse/YARN-2255) | YARN Audit logging not added to log4j.properties | Major | . | Varun Saxena | Aihua Xu | +| [HDFS-14836](https://issues.apache.org/jira/browse/HDFS-14836) | FileIoProvider should not increase FileIoErrors metric in datanode volume metric | Minor | . | Aiphago | Aiphago | +| [HADOOP-16582](https://issues.apache.org/jira/browse/HADOOP-16582) | LocalFileSystem's mkdirs() does not work as expected under viewfs. | Major | . | Kihwal Lee | Kihwal Lee | +| [HADOOP-16581](https://issues.apache.org/jira/browse/HADOOP-16581) | ValueQueue does not trigger an async refill when number of values falls below watermark | Major | common, kms | Yuval Degani | Yuval Degani | +| [HDFS-14853](https://issues.apache.org/jira/browse/HDFS-14853) | NPE in DFSNetworkTopology#chooseRandomWithStorageType() when the excludedNode is not present | Major | . | Ranith Sardar | Ranith Sardar | +| [HDFS-13660](https://issues.apache.org/jira/browse/HDFS-13660) | DistCp job fails when new data is appended in the file while the distCp copy job is running | Critical | distcp | Mukund Thakur | Mukund Thakur | +| [HDFS-14808](https://issues.apache.org/jira/browse/HDFS-14808) | EC: Improper size values for corrupt ec block in LOG | Major | ec | Harshakiran Reddy | Ayush Saxena | +| [HDFS-14849](https://issues.apache.org/jira/browse/HDFS-14849) | Erasure Coding: the internal block is replicated many times when datanode is decommissioning | Major | ec, erasure-coding | HuangTao | HuangTao | +| [YARN-9858](https://issues.apache.org/jira/browse/YARN-9858) | Optimize RMContext getExclusiveEnforcedPartitions | Major | . | Jonathan Hung | Jonathan Hung | +| [HDFS-14492](https://issues.apache.org/jira/browse/HDFS-14492) | Snapshot memory leak | Major | snapshots | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-14418](https://issues.apache.org/jira/browse/HDFS-14418) | Remove redundant super user priveledge checks from namenode. | Major | . | Ayush Saxena | Ayush Saxena | +| [HADOOP-16619](https://issues.apache.org/jira/browse/HADOOP-16619) | Upgrade jackson and jackson-databind to 2.9.10 | Major | . | Siyao Meng | Siyao Meng | +| [HDFS-14637](https://issues.apache.org/jira/browse/HDFS-14637) | Namenode may not replicate blocks to meet the policy after enabling upgradeDomain | Major | namenode | Stephen O'Donnell | Stephen O'Donnell | +| [HDFS-14879](https://issues.apache.org/jira/browse/HDFS-14879) | Header was wrong in Snapshot web UI | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HDFS-14655](https://issues.apache.org/jira/browse/HDFS-14655) | [SBN Read] Namenode crashes if one of The JN is down | Critical | . | Harshakiran Reddy | Ayush Saxena | +| [HDFS-14859](https://issues.apache.org/jira/browse/HDFS-14859) | Prevent unnecessary evaluation of costly operation getNumLiveDataNodes when dfs.namenode.safemode.min.datanodes is not zero | Major | hdfs | Srinivasu Majeti | Srinivasu Majeti | +| [YARN-6715](https://issues.apache.org/jira/browse/YARN-6715) | Fix documentation about NodeHealthScriptRunner | Major | documentation, nodemanager | Peter Bacsko | Peter Bacsko | +| [YARN-9552](https://issues.apache.org/jira/browse/YARN-9552) | FairScheduler: NODE\_UPDATE can cause NoSuchElementException | Major | fairscheduler | Peter Bacsko | Peter Bacsko | +| [HDFS-14754](https://issues.apache.org/jira/browse/HDFS-14754) | Erasure Coding : The number of Under-Replicated Blocks never reduced | Critical | ec | Hemanth Boyina | Hemanth Boyina | +| [HDFS-14245](https://issues.apache.org/jira/browse/HDFS-14245) | Class cast error in GetGroups with ObserverReadProxyProvider | Major | . | Shen Yinjie | Erik Krogen | +| [HDFS-14373](https://issues.apache.org/jira/browse/HDFS-14373) | EC : Decoding is failing when block group last incomplete cell fall in to AlignedStripe | Critical | ec, hdfs-client | Surendra Singh Lilhore | Surendra Singh Lilhore | +| [HDFS-14509](https://issues.apache.org/jira/browse/HDFS-14509) | DN throws InvalidToken due to inequality of password when upgrade NN 2.x to 3.x | Blocker | . | Yuxuan Wang | Yuxuan Wang | +| [HDFS-14886](https://issues.apache.org/jira/browse/HDFS-14886) | In NameNode Web UI's Startup Progress page, Loading edits always shows 0 sec | Major | . | Hemanth Boyina | Hemanth Boyina | +| [YARN-8453](https://issues.apache.org/jira/browse/YARN-8453) | Additional Unit tests to verify queue limit and max-limit with multiple resource types | Major | capacity scheduler | Sunil G | Adam Antal | +| [HDFS-14890](https://issues.apache.org/jira/browse/HDFS-14890) | Setting permissions on name directory fails on non posix compliant filesystems | Blocker | . | hirik | Siddharth Wagle | +| [HADOOP-16580](https://issues.apache.org/jira/browse/HADOOP-16580) | Disable retry of FailoverOnNetworkExceptionRetry in case of AccessControlException | Major | common | Adam Antal | Adam Antal | +| [HDFS-14909](https://issues.apache.org/jira/browse/HDFS-14909) | DFSNetworkTopology#chooseRandomWithStorageType() should not decrease storage count for excluded node which is already part of excluded scope | Major | namenode | Surendra Singh Lilhore | Surendra Singh Lilhore | +| [HADOOP-16662](https://issues.apache.org/jira/browse/HADOOP-16662) | Remove unnecessary InnerNode check in NetworkTopology#add() | Minor | . | Lisheng Sun | Lisheng Sun | +| [HDFS-14847](https://issues.apache.org/jira/browse/HDFS-14847) | Erasure Coding: Blocks are over-replicated while EC decommissioning | Critical | ec | Hui Fei | Hui Fei | +| [HDFS-14913](https://issues.apache.org/jira/browse/HDFS-14913) | Correct the value of available count in DFSNetworkTopology#chooseRandomWithStorageType() | Major | . | Ayush Saxena | Ayush Saxena | +| [YARN-9915](https://issues.apache.org/jira/browse/YARN-9915) | Fix FindBug issue in QueueMetrics | Minor | . | Prabhu Joseph | Prabhu Joseph | +| [HDFS-12749](https://issues.apache.org/jira/browse/HDFS-12749) | DN may not send block report to NN after NN restart | Major | datanode | TanYuxin | Xiaoqiao He | +| [HDFS-13901](https://issues.apache.org/jira/browse/HDFS-13901) | INode access time is ignored because of race between open and rename | Major | . | Jinglun | Jinglun | +| [HDFS-14910](https://issues.apache.org/jira/browse/HDFS-14910) | Rename Snapshot with Pre Descendants Fail With IllegalArgumentException. | Blocker | . | Íñigo Goiri | Wei-Chiu Chuang | +| [HDFS-14308](https://issues.apache.org/jira/browse/HDFS-14308) | DFSStripedInputStream curStripeBuf is not freed by unbuffer() | Major | ec | Joe McDonnell | Zhao Yi Ming | +| [HDFS-14931](https://issues.apache.org/jira/browse/HDFS-14931) | hdfs crypto commands limit column width | Major | . | Eric Badger | Eric Badger | +| [HADOOP-16669](https://issues.apache.org/jira/browse/HADOOP-16669) | TestRawLocalFileSystemContract.testPermission fails if no native library | Minor | common, test | Steve Loughran | Steve Loughran | +| [HDFS-14920](https://issues.apache.org/jira/browse/HDFS-14920) | Erasure Coding: Decommission may hang If one or more datanodes are out of service during decommission | Major | ec | Hui Fei | Hui Fei | +| [HDFS-13736](https://issues.apache.org/jira/browse/HDFS-13736) | BlockPlacementPolicyDefault can not choose favored nodes when 'dfs.namenode.block-placement-policy.default.prefer-local-node' set to false | Major | . | hu xiaodong | hu xiaodong | +| [HDFS-14925](https://issues.apache.org/jira/browse/HDFS-14925) | rename operation should check nest snapshot | Major | namenode | Junwang Zhao | Junwang Zhao | +| [YARN-9949](https://issues.apache.org/jira/browse/YARN-9949) | Add missing queue configs for root queue in RMWebService#CapacitySchedulerInfo | Minor | capacity scheduler | Prabhu Joseph | Prabhu Joseph | +| [HDFS-14945](https://issues.apache.org/jira/browse/HDFS-14945) | Revise PacketResponder's log. | Minor | datanode | Xudong Cao | Xudong Cao | +| [HDFS-14946](https://issues.apache.org/jira/browse/HDFS-14946) | Erasure Coding: Block recovery failed during decommissioning | Major | . | Hui Fei | Hui Fei | +| [HDFS-14384](https://issues.apache.org/jira/browse/HDFS-14384) | When lastLocatedBlock token expire, it will take 1~3s second to refetch it. | Major | hdfs-client | Surendra Singh Lilhore | Surendra Singh Lilhore | +| [HDFS-14806](https://issues.apache.org/jira/browse/HDFS-14806) | Bootstrap standby may fail if used in-progress tailing | Major | namenode | Chen Liang | Chen Liang | +| [HDFS-14941](https://issues.apache.org/jira/browse/HDFS-14941) | Potential editlog race condition can cause corrupted file | Major | namenode | Chen Liang | Chen Liang | +| [HDFS-14958](https://issues.apache.org/jira/browse/HDFS-14958) | TestBalancerWithNodeGroup is not using NetworkTopologyWithNodeGroup | Minor | hdfs | Jim Brennan | Jim Brennan | +| [HDFS-14720](https://issues.apache.org/jira/browse/HDFS-14720) | DataNode shouldn't report block as bad block if the block length is Long.MAX\_VALUE. | Major | datanode | Surendra Singh Lilhore | Hemanth Boyina | +| [HADOOP-16676](https://issues.apache.org/jira/browse/HADOOP-16676) | Backport HADOOP-16152 to branch-3.2 | Major | common | DW | Siyao Meng | +| [HADOOP-16677](https://issues.apache.org/jira/browse/HADOOP-16677) | Recalculate the remaining timeout millis correctly while throwing an InterupptedException in SocketIOWithTimeout. | Minor | common | Xudong Cao | Xudong Cao | +| [HDFS-14884](https://issues.apache.org/jira/browse/HDFS-14884) | Add sanity check that zone key equals feinfo key while setting Xattrs | Major | encryption, hdfs | Mukul Kumar Singh | Mukul Kumar Singh | +| [HADOOP-15097](https://issues.apache.org/jira/browse/HADOOP-15097) | AbstractContractDeleteTest::testDeleteNonEmptyDirRecursive with misleading path | Minor | fs, test | zhoutai.zt | Xieming Li | +| [HADOOP-16710](https://issues.apache.org/jira/browse/HADOOP-16710) | testing\_azure.md documentation is misleading | Major | fs/azure, test | Andras Bokor | Andras Bokor | +| [YARN-9984](https://issues.apache.org/jira/browse/YARN-9984) | FSPreemptionThread can cause NullPointerException while app is unregistered with containers running on a node | Major | fairscheduler | Wilfred Spiegelenburg | Wilfred Spiegelenburg | +| [YARN-9983](https://issues.apache.org/jira/browse/YARN-9983) | Typo in YARN Service overview documentation | Trivial | documentation | Denes Gerencser | Denes Gerencser | +| [HADOOP-16719](https://issues.apache.org/jira/browse/HADOOP-16719) | Remove the disallowed element config within maven-checkstyle-plugin | Major | . | Wanqiang Ji | Wanqiang Ji | +| [HADOOP-16700](https://issues.apache.org/jira/browse/HADOOP-16700) | RpcQueueTime may be negative when the response has to be sent later | Minor | . | xuzq | xuzq | +| [HADOOP-15686](https://issues.apache.org/jira/browse/HADOOP-15686) | Supress bogus AbstractWadlGeneratorGrammarGenerator in KMS stderr | Major | kms | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-14940](https://issues.apache.org/jira/browse/HDFS-14940) | HDFS Balancer : Do not allow to set balancer maximum network bandwidth more than 1TB | Minor | balancer & mover | Souryakanta Dwivedy | Hemanth Boyina | +| [YARN-9838](https://issues.apache.org/jira/browse/YARN-9838) | Fix resource inconsistency for queues when moving app with reserved container to another queue | Critical | capacity scheduler | jiulongzhu | jiulongzhu | +| [YARN-9968](https://issues.apache.org/jira/browse/YARN-9968) | Public Localizer is exiting in NodeManager due to NullPointerException | Major | nodemanager | Tarun Parimi | Tarun Parimi | +| [YARN-9011](https://issues.apache.org/jira/browse/YARN-9011) | Race condition during decommissioning | Major | nodemanager | Peter Bacsko | Peter Bacsko | +| [HDFS-14973](https://issues.apache.org/jira/browse/HDFS-14973) | Balancer getBlocks RPC dispersal does not function properly | Major | balancer & mover | Erik Krogen | Erik Krogen | +| [HADOOP-16685](https://issues.apache.org/jira/browse/HADOOP-16685) | FileSystem#listStatusIterator does not check if given path exists | Major | fs | Sahil Takiar | Sahil Takiar | +| [MAPREDUCE-7240](https://issues.apache.org/jira/browse/MAPREDUCE-7240) | Exception ' Invalid event: TA\_TOO\_MANY\_FETCH\_FAILURE at SUCCESS\_FINISHING\_CONTAINER' cause job error | Critical | . | luhuachao | luhuachao | +| [MAPREDUCE-7249](https://issues.apache.org/jira/browse/MAPREDUCE-7249) | Invalid event TA\_TOO\_MANY\_FETCH\_FAILURE at SUCCESS\_CONTAINER\_CLEANUP causes job failure | Critical | applicationmaster, mrv2 | Wilfred Spiegelenburg | Wilfred Spiegelenburg | +| [YARN-9993](https://issues.apache.org/jira/browse/YARN-9993) | Remove incorrectly committed files from YARN-9011 | Major | yarn | Wilfred Spiegelenburg | Wilfred Spiegelenburg | +| [HDFS-15010](https://issues.apache.org/jira/browse/HDFS-15010) | BlockPoolSlice#addReplicaThreadPool static pool should be initialized by static method | Major | datanode | Surendra Singh Lilhore | Surendra Singh Lilhore | +| [HADOOP-16744](https://issues.apache.org/jira/browse/HADOOP-16744) | Fix building instruction to enable zstd | Minor | documentation | Masatake Iwasaki | Masatake Iwasaki | +| [YARN-9985](https://issues.apache.org/jira/browse/YARN-9985) | Unsupported "transitionToObserver" option displaying for rmadmin command | Minor | RM, yarn | Souryakanta Dwivedy | Ayush Saxena | +| [HADOOP-16754](https://issues.apache.org/jira/browse/HADOOP-16754) | Fix docker failed to build yetus/hadoop | Blocker | build | Kevin Su | Kevin Su | +| [HDFS-15032](https://issues.apache.org/jira/browse/HDFS-15032) | Balancer crashes when it fails to contact an unavailable NN via ObserverReadProxyProvider | Major | balancer & mover | Erik Krogen | Erik Krogen | +| [HDFS-15036](https://issues.apache.org/jira/browse/HDFS-15036) | Active NameNode should not silently fail the image transfer | Major | namenode | Konstantin Shvachko | Chen Liang | +| [HDFS-14519](https://issues.apache.org/jira/browse/HDFS-14519) | NameQuota is not update after concat operation, so namequota is wrong | Major | . | Ranith Sardar | Ranith Sardar | +| [YARN-10055](https://issues.apache.org/jira/browse/YARN-10055) | bower install fails | Blocker | build, yarn-ui-v2 | Akira Ajisaka | Akira Ajisaka | +| [HDFS-15076](https://issues.apache.org/jira/browse/HDFS-15076) | Fix tests that hold FSDirectory lock, without holding FSNamesystem lock. | Major | test | Konstantin Shvachko | Konstantin Shvachko | +| [HDFS-15073](https://issues.apache.org/jira/browse/HDFS-15073) | Replace curator-shaded guava import with the standard one | Minor | hdfs-client | Akira Ajisaka | Chandra Sanivarapu | +| [HADOOP-16042](https://issues.apache.org/jira/browse/HADOOP-16042) | Update the link to HadoopJavaVersion | Minor | documentation | Akira Ajisaka | Chandra Sanivarapu | +| [HDFS-14934](https://issues.apache.org/jira/browse/HDFS-14934) | [SBN Read] Standby NN throws many InterruptedExceptions when dfs.ha.tail-edits.period is 0 | Major | . | Takanobu Asanuma | Ayush Saxena | +| [YARN-10053](https://issues.apache.org/jira/browse/YARN-10053) | Placement rules do not use correct group service init | Major | yarn | Wilfred Spiegelenburg | Wilfred Spiegelenburg | +| [HDFS-15068](https://issues.apache.org/jira/browse/HDFS-15068) | DataNode could meet deadlock if invoke refreshVolumes when register | Major | datanode | Xiaoqiao He | Aiphago | +| [MAPREDUCE-7255](https://issues.apache.org/jira/browse/MAPREDUCE-7255) | Fix typo in MapReduce documentaion example | Trivial | documentation | Sergey Pogorelov | Sergey Pogorelov | +| [HDFS-15072](https://issues.apache.org/jira/browse/HDFS-15072) | HDFS MiniCluster fails to start when run in directory path with a % | Minor | . | Geoffrey Jacoby | Masatake Iwasaki | +| [HDFS-15077](https://issues.apache.org/jira/browse/HDFS-15077) | Fix intermittent failure of TestDFSClientRetries#testLeaseRenewSocketTimeout | Minor | test | Masatake Iwasaki | Masatake Iwasaki | +| [HDFS-15080](https://issues.apache.org/jira/browse/HDFS-15080) | Fix the issue in reading persistent memory cached data with an offset | Major | caching, datanode | Feilong He | Feilong He | +| [YARN-7387](https://issues.apache.org/jira/browse/YARN-7387) | org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestIncreaseAllocationExpirer fails intermittently | Major | . | Miklos Szegedi | Jim Brennan | +| [YARN-8672](https://issues.apache.org/jira/browse/YARN-8672) | TestContainerManager#testLocalingResourceWhileContainerRunning occasionally times out | Major | nodemanager | Jason Darrell Lowe | Chandni Singh | +| [HDFS-14957](https://issues.apache.org/jira/browse/HDFS-14957) | INodeReference Space Consumed was not same in QuotaUsage and ContentSummary | Major | namenode | Hemanth Boyina | Hemanth Boyina | +| [MAPREDUCE-7252](https://issues.apache.org/jira/browse/MAPREDUCE-7252) | Handling 0 progress in SimpleExponential task runtime estimator | Minor | . | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-16749](https://issues.apache.org/jira/browse/HADOOP-16749) | Configuration parsing of CDATA values are blank | Major | conf | Jonathan Turner Eagles | Daryn Sharp | +| [HDFS-15095](https://issues.apache.org/jira/browse/HDFS-15095) | Fix accidental comment in flaky test TestDecommissioningStatus | Major | hdfs | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15099](https://issues.apache.org/jira/browse/HDFS-15099) | [SBN Read] checkOperation(WRITE) should throw ObserverRetryOnActiveException on ObserverNode | Major | namenode | Konstantin Shvachko | Chen Liang | +| [HDFS-14578](https://issues.apache.org/jira/browse/HDFS-14578) | AvailableSpaceBlockPlacementPolicy always prefers local node | Major | block placement | Wei-Chiu Chuang | Ayush Saxena | +| [HADOOP-16683](https://issues.apache.org/jira/browse/HADOOP-16683) | Disable retry of FailoverOnNetworkExceptionRetry in case of wrapped AccessControlException | Major | common | Adam Antal | Adam Antal | +| [MAPREDUCE-7256](https://issues.apache.org/jira/browse/MAPREDUCE-7256) | Fix javadoc error in SimpleExponentialSmoothing | Minor | documentation | Masatake Iwasaki | Masatake Iwasaki | +| [YARN-8373](https://issues.apache.org/jira/browse/YARN-8373) | RM Received RMFatalEvent of type CRITICAL\_THREAD\_CRASH | Major | fairscheduler, resourcemanager | Girish Bhat | Wilfred Spiegelenburg | +| [MAPREDUCE-7247](https://issues.apache.org/jira/browse/MAPREDUCE-7247) | Modify HistoryServerRest.html content,change The job attempt id‘s datatype from string to int | Major | documentation | zhaoshengjie | zhaoshengjie | +| [YARN-9970](https://issues.apache.org/jira/browse/YARN-9970) | Refactor TestUserGroupMappingPlacementRule#verifyQueueMapping | Major | . | Manikandan R | Manikandan R | +| [YARN-8148](https://issues.apache.org/jira/browse/YARN-8148) | Update decimal values for queue capacities shown on queue status CLI | Major | client | Prabhu Joseph | Prabhu Joseph | +| [HADOOP-16808](https://issues.apache.org/jira/browse/HADOOP-16808) | Use forkCount and reuseForks parameters instead of forkMode in the config of maven surefire plugin | Minor | build | Akira Ajisaka | Xieming Li | +| [HADOOP-16793](https://issues.apache.org/jira/browse/HADOOP-16793) | Remove WARN log when ipc connection interrupted in Client#handleSaslConnectionFailure() | Minor | . | Lisheng Sun | Lisheng Sun | +| [YARN-9462](https://issues.apache.org/jira/browse/YARN-9462) | TestResourceTrackerService.testNodeRemovalGracefully fails sporadically | Minor | resourcemanager, test | Prabhu Joseph | Prabhu Joseph | +| [YARN-9790](https://issues.apache.org/jira/browse/YARN-9790) | Failed to set default-application-lifetime if maximum-application-lifetime is less than or equal to zero | Major | . | kyungwan nam | kyungwan nam | +| [HDFS-14993](https://issues.apache.org/jira/browse/HDFS-14993) | checkDiskError doesn't work during datanode startup | Major | datanode | Yang Yun | Yang Yun | +| [HDFS-13179](https://issues.apache.org/jira/browse/HDFS-13179) | TestLazyPersistReplicaRecovery#testDnRestartWithSavedReplicas fails intermittently | Critical | fs | Gabor Bota | Ahmed Hussein | +| [MAPREDUCE-7259](https://issues.apache.org/jira/browse/MAPREDUCE-7259) | testSpeculateSuccessfulWithUpdateEvents fails Intermittently | Minor | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15146](https://issues.apache.org/jira/browse/HDFS-15146) | TestBalancerRPCDelay.testBalancerRPCDelay fails intermittently | Minor | . | Ahmed Hussein | Ahmed Hussein | +| [MAPREDUCE-7079](https://issues.apache.org/jira/browse/MAPREDUCE-7079) | JobHistory#ServiceStop implementation is incorrect | Major | . | Jason Darrell Lowe | Ahmed Hussein | +| [HDFS-15118](https://issues.apache.org/jira/browse/HDFS-15118) | [SBN Read] Slow clients when Observer reads are enabled but there are no Observers on the cluster. | Major | hdfs-client | Konstantin Shvachko | Chen Liang | +| [HDFS-7175](https://issues.apache.org/jira/browse/HDFS-7175) | Client-side SocketTimeoutException during Fsck | Major | namenode | Carl Steinbach | Stephen O'Donnell | +| [HDFS-15148](https://issues.apache.org/jira/browse/HDFS-15148) | dfs.namenode.send.qop.enabled should not apply to primary NN port | Major | . | Chen Liang | Chen Liang | +| [HADOOP-16410](https://issues.apache.org/jira/browse/HADOOP-16410) | Hadoop 3.2 azure jars incompatible with alpine 3.9 | Minor | fs/azure | Jose Luis Pedrosa | | +| [HDFS-15115](https://issues.apache.org/jira/browse/HDFS-15115) | Namenode crash caused by NPE in BlockPlacementPolicyDefault when dynamically change logger to debug | Major | . | wangzhixiang | wangzhixiang | +| [HDFS-15158](https://issues.apache.org/jira/browse/HDFS-15158) | The number of failed volumes mismatch with volumeFailures of Datanode metrics | Minor | datanode | Yang Yun | Yang Yun | +| [HADOOP-16849](https://issues.apache.org/jira/browse/HADOOP-16849) | start-build-env.sh behaves incorrectly when username is numeric only | Minor | build | Jihyun Cho | Jihyun Cho | +| [HDFS-15161](https://issues.apache.org/jira/browse/HDFS-15161) | When evictableMmapped or evictable size is zero, do not throw NoSuchElementException in ShortCircuitCache#close() | Major | . | Lisheng Sun | Lisheng Sun | +| [HDFS-15164](https://issues.apache.org/jira/browse/HDFS-15164) | Fix TestDelegationTokensWithHA | Major | . | Ayush Saxena | Ayush Saxena | +| [HADOOP-16868](https://issues.apache.org/jira/browse/HADOOP-16868) | ipc.Server readAndProcess threw NullPointerException | Major | rpc-server | Tsz-wo Sze | Tsz-wo Sze | +| [HADOOP-16869](https://issues.apache.org/jira/browse/HADOOP-16869) | Upgrade findbugs-maven-plugin to 3.0.5 to fix mvn findbugs:findbugs failure | Major | build | Akira Ajisaka | Akira Ajisaka | +| [HDFS-15052](https://issues.apache.org/jira/browse/HDFS-15052) | WebHDFS getTrashRoot leads to OOM due to FileSystem object creation | Major | webhdfs | Wei-Chiu Chuang | Masatake Iwasaki | +| [HDFS-15185](https://issues.apache.org/jira/browse/HDFS-15185) | StartupProgress reports edits segments until the entire startup completes | Major | namenode | Konstantin Shvachko | Konstantin Shvachko | +| [HDFS-15166](https://issues.apache.org/jira/browse/HDFS-15166) | Remove redundant field fStream in ByteStringLog | Major | . | Konstantin Shvachko | Xieming Li | +| [YARN-10143](https://issues.apache.org/jira/browse/YARN-10143) | YARN-10101 broke Yarn logs CLI | Blocker | yarn | Adam Antal | Adam Antal | +| [HADOOP-16841](https://issues.apache.org/jira/browse/HADOOP-16841) | The description of hadoop.http.authentication.signature.secret.file contains outdated information | Minor | documentation | Akira Ajisaka | Xieming Li | +| [YARN-10156](https://issues.apache.org/jira/browse/YARN-10156) | Fix typo 'complaint' which means quite different in Federation.md | Minor | documentation, federation | Sungpeo Kook | Sungpeo Kook | +| [HDFS-15147](https://issues.apache.org/jira/browse/HDFS-15147) | LazyPersistTestCase wait logic is error-prone | Minor | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-14668](https://issues.apache.org/jira/browse/HDFS-14668) | Support Fuse with Users from multiple Security Realms | Critical | fuse-dfs | Sailesh Patel | István Fajth | +| [HDFS-15111](https://issues.apache.org/jira/browse/HDFS-15111) | stopStandbyServices() should log which service state it is transitioning from. | Major | hdfs, logging | Konstantin Shvachko | Xieming Li | +| [HDFS-15199](https://issues.apache.org/jira/browse/HDFS-15199) | NPE in BlockSender | Major | . | Ayush Saxena | Ayush Saxena | +| [HADOOP-16891](https://issues.apache.org/jira/browse/HADOOP-16891) | Upgrade jackson-databind to 2.9.10.3 | Blocker | . | Siyao Meng | Siyao Meng | +| [HDFS-15204](https://issues.apache.org/jira/browse/HDFS-15204) | TestRetryCacheWithHA testRemoveCacheDescriptor fails intermittently | Major | hdfs | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-16840](https://issues.apache.org/jira/browse/HADOOP-16840) | AliyunOSS: getFileStatus throws FileNotFoundException in versioning bucket | Major | fs/oss | wujinhu | wujinhu | +| [YARN-9419](https://issues.apache.org/jira/browse/YARN-9419) | Log a warning if GPU isolation is enabled but LinuxContainerExecutor is disabled | Major | . | Szilard Nemeth | Andras Gyori | +| [YARN-9427](https://issues.apache.org/jira/browse/YARN-9427) | TestContainerSchedulerQueuing.testKillOnlyRequiredOpportunisticContainers fails sporadically | Major | scheduler, test | Prabhu Joseph | Ahmed Hussein | +| [HDFS-15135](https://issues.apache.org/jira/browse/HDFS-15135) | EC : ArrayIndexOutOfBoundsException in BlockRecoveryWorker#RecoveryTaskStriped. | Major | erasure-coding | Surendra Singh Lilhore | Ravuri Sushma sree | +| [HDFS-14442](https://issues.apache.org/jira/browse/HDFS-14442) | Disagreement between HAUtil.getAddressOfActive and RpcInvocationHandler.getConnectionId | Major | . | Erik Krogen | Ravuri Sushma sree | +| [HDFS-15216](https://issues.apache.org/jira/browse/HDFS-15216) | Wrong Use Case of -showprogress in fsck | Major | . | Ravuri Sushma sree | Ravuri Sushma sree | +| [HDFS-15211](https://issues.apache.org/jira/browse/HDFS-15211) | EC: File write hangs during close in case of Exception during updatePipeline | Critical | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15208](https://issues.apache.org/jira/browse/HDFS-15208) | Suppress bogus AbstractWadlGeneratorGrammarGenerator in KMS stderr in hdfs | Trivial | . | Wei-Chiu Chuang | Wei-Chiu Chuang | +| [HDFS-15223](https://issues.apache.org/jira/browse/HDFS-15223) | FSCK fails if one namenode is not available | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15232](https://issues.apache.org/jira/browse/HDFS-15232) | Fix libhdfspp test failures with GCC 7 | Major | native, test | Akira Ajisaka | Akira Ajisaka | +| [HDFS-15219](https://issues.apache.org/jira/browse/HDFS-15219) | DFS Client will stuck when ResponseProcessor.run throw Error | Major | hdfs-client | zhengchenyu | zhengchenyu | +| [HDFS-15191](https://issues.apache.org/jira/browse/HDFS-15191) | EOF when reading legacy buffer in BlockTokenIdentifier | Major | hdfs | Steven Rand | Steven Rand | +| [YARN-10202](https://issues.apache.org/jira/browse/YARN-10202) | Fix documentation about NodeAttributes. | Minor | documentation | Sen Zhao | Sen Zhao | +| [HADOOP-16949](https://issues.apache.org/jira/browse/HADOOP-16949) | pylint fails in the build environment | Critical | build | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-14836](https://issues.apache.org/jira/browse/HADOOP-14836) | Upgrade maven-clean-plugin to 3.1.0 | Major | build | Allen Wittenauer | Akira Ajisaka | +| [YARN-10207](https://issues.apache.org/jira/browse/YARN-10207) | CLOSE\_WAIT socket connection leaks during rendering of (corrupted) aggregated logs on the JobHistoryServer Web UI | Major | yarn | Siddharth Ahuja | Siddharth Ahuja | +| [HDFS-12862](https://issues.apache.org/jira/browse/HDFS-12862) | CacheDirective becomes invalid when NN restart or failover | Major | caching, hdfs | Wang XL | Wang XL | +| [MAPREDUCE-7272](https://issues.apache.org/jira/browse/MAPREDUCE-7272) | TaskAttemptListenerImpl excessive log messages | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15283](https://issues.apache.org/jira/browse/HDFS-15283) | Cache pool MAXTTL is not persisted and restored on cluster restart | Major | namenode | Stephen O'Donnell | Stephen O'Donnell | +| [HADOOP-16944](https://issues.apache.org/jira/browse/HADOOP-16944) | Use Yetus 0.12.0 in GitHub PR | Major | build | Akira Ajisaka | Akira Ajisaka | +| [HDFS-15276](https://issues.apache.org/jira/browse/HDFS-15276) | Concat on INodeRefernce fails with illegal state exception | Critical | . | Hemanth Boyina | Hemanth Boyina | +| [YARN-10223](https://issues.apache.org/jira/browse/YARN-10223) | Duplicate jersey-test-framework-core dependency in yarn-server-common | Minor | build | Akira Ajisaka | Akira Ajisaka | +| [HDFS-15281](https://issues.apache.org/jira/browse/HDFS-15281) | ZKFC ignores dfs.namenode.rpc-bind-host and uses dfs.namenode.rpc-address to bind to host address | Major | ha, namenode | Dhiraj Hegde | Dhiraj Hegde | +| [HDFS-15297](https://issues.apache.org/jira/browse/HDFS-15297) | TestNNHandlesBlockReportPerStorage::blockReport\_02 fails intermittently in trunk | Major | datanode, test | Mingliang Liu | Ayush Saxena | +| [HADOOP-17014](https://issues.apache.org/jira/browse/HADOOP-17014) | Upgrade jackson-databind to 2.9.10.4 | Blocker | . | Siyao Meng | Siyao Meng | +| [YARN-9848](https://issues.apache.org/jira/browse/YARN-9848) | revert YARN-4946 | Blocker | log-aggregation, resourcemanager | Steven Rand | Steven Rand | +| [HDFS-15286](https://issues.apache.org/jira/browse/HDFS-15286) | Concat on a same files deleting the file | Critical | . | Hemanth Boyina | Hemanth Boyina | +| [YARN-10256](https://issues.apache.org/jira/browse/YARN-10256) | Refactor TestContainerSchedulerQueuing.testContainerUpdateExecTypeGuaranteedToOpportunistic | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15270](https://issues.apache.org/jira/browse/HDFS-15270) | Account for \*env == NULL in hdfsThreadDestructor | Major | . | Babneet Singh | Babneet Singh | +| [YARN-8959](https://issues.apache.org/jira/browse/YARN-8959) | TestContainerResizing fails randomly | Minor | . | Bibin Chundatt | Ahmed Hussein | +| [HDFS-15323](https://issues.apache.org/jira/browse/HDFS-15323) | StandbyNode fails transition to active due to insufficient transaction tailing | Major | namenode, qjm | Konstantin Shvachko | Konstantin Shvachko | +| [HADOOP-17025](https://issues.apache.org/jira/browse/HADOOP-17025) | Fix invalid metastore configuration in S3GuardTool tests | Minor | fs/s3, test | Masatake Iwasaki | Masatake Iwasaki | +| [HDFS-15339](https://issues.apache.org/jira/browse/HDFS-15339) | TestHDFSCLI fails for user names with the dot/dash character | Major | test | Yan Xiaole | Yan Xiaole | +| [HDFS-15250](https://issues.apache.org/jira/browse/HDFS-15250) | Setting \`dfs.client.use.datanode.hostname\` to true can crash the system because of unhandled UnresolvedAddressException | Major | . | Ctest | Ctest | +| [HDFS-14367](https://issues.apache.org/jira/browse/HDFS-14367) | EC: Parameter maxPoolSize in striped reconstruct thread pool isn't affecting number of threads | Major | ec | Guo Lei | Guo Lei | +| [HADOOP-15565](https://issues.apache.org/jira/browse/HADOOP-15565) | ViewFileSystem.close doesn't close child filesystems and causes FileSystem objects leak. | Major | . | Jinglun | Jinglun | +| [YARN-9444](https://issues.apache.org/jira/browse/YARN-9444) | YARN API ResourceUtils's getRequestedResourcesFromConfig doesn't recognize yarn.io/gpu as a valid resource | Minor | api | Gergely Pollak | Gergely Pollak | +| [HADOOP-17044](https://issues.apache.org/jira/browse/HADOOP-17044) | Revert "HADOOP-8143. Change distcp to have -pb on by default" | Major | tools/distcp | Steve Loughran | Steve Loughran | +| [HDFS-15293](https://issues.apache.org/jira/browse/HDFS-15293) | Relax the condition for accepting a fsimage when receiving a checkpoint | Critical | namenode | Chen Liang | Chen Liang | +| [HADOOP-17024](https://issues.apache.org/jira/browse/HADOOP-17024) | ListStatus on ViewFS root (ls "/") should list the linkFallBack root (configured target root). | Major | fs, viewfs | Uma Maheswara Rao G | Abhishek Das | +| [HADOOP-17040](https://issues.apache.org/jira/browse/HADOOP-17040) | Fix intermittent failure of ITestBlockingThreadPoolExecutorService | Minor | fs/s3, test | Masatake Iwasaki | Masatake Iwasaki | +| [HDFS-15363](https://issues.apache.org/jira/browse/HDFS-15363) | BlockPlacementPolicyWithNodeGroup should validate if it is initialized by NetworkTopologyWithNodeGroup | Major | . | Hemanth Boyina | Hemanth Boyina | +| [MAPREDUCE-7278](https://issues.apache.org/jira/browse/MAPREDUCE-7278) | Speculative execution behavior is observed even when mapreduce.map.speculative and mapreduce.reduce.speculative are false | Major | task | Tarun Parimi | Tarun Parimi | +| [HADOOP-7002](https://issues.apache.org/jira/browse/HADOOP-7002) | Wrong description of copyFromLocal and copyToLocal in documentation | Minor | . | Jingguo Yao | Andras Bokor | +| [HADOOP-17052](https://issues.apache.org/jira/browse/HADOOP-17052) | NetUtils.connect() throws unchecked exception (UnresolvedAddressException) causing clients to abort | Major | net | Dhiraj Hegde | Dhiraj Hegde | +| [HADOOP-17062](https://issues.apache.org/jira/browse/HADOOP-17062) | Fix shelldocs path in Jenkinsfile | Major | build | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17056](https://issues.apache.org/jira/browse/HADOOP-17056) | shelldoc fails in hadoop-common | Major | build | Akira Ajisaka | Akira Ajisaka | +| [YARN-10286](https://issues.apache.org/jira/browse/YARN-10286) | PendingContainers bugs in the scheduler outputs | Critical | . | Adam Antal | Andras Gyori | +| [HDFS-15396](https://issues.apache.org/jira/browse/HDFS-15396) | Fix TestViewFileSystemOverloadSchemeHdfsFileSystemContract#testListStatusRootDir | Major | . | Ayush Saxena | Ayush Saxena | +| [HDFS-15386](https://issues.apache.org/jira/browse/HDFS-15386) | ReplicaNotFoundException keeps happening in DN after removing multiple DN's data directories | Major | . | Toshihiro Suzuki | Toshihiro Suzuki | +| [YARN-10300](https://issues.apache.org/jira/browse/YARN-10300) | appMasterHost not set in RM ApplicationSummary when AM fails before first heartbeat | Major | . | Eric Badger | Eric Badger | +| [HADOOP-17059](https://issues.apache.org/jira/browse/HADOOP-17059) | ArrayIndexOfboundsException in ViewFileSystem#listStatus | Major | viewfs | Hemanth Boyina | Hemanth Boyina | +| [YARN-10296](https://issues.apache.org/jira/browse/YARN-10296) | Make ContainerPBImpl#getId/setId synchronized | Minor | . | Benjamin Teke | Benjamin Teke | +| [YARN-10295](https://issues.apache.org/jira/browse/YARN-10295) | CapacityScheduler NPE can cause apps to get stuck without resources | Major | capacityscheduler | Benjamin Teke | Benjamin Teke | +| [HADOOP-17060](https://issues.apache.org/jira/browse/HADOOP-17060) | listStatus and getFileStatus behave inconsistent in the case of ViewFs implementation for isDirectory | Major | viewfs | Srinivasu Majeti | Uma Maheswara Rao G | +| [YARN-10312](https://issues.apache.org/jira/browse/YARN-10312) | Add support for yarn logs -logFile to retain backward compatibility | Major | client | Jim Brennan | Jim Brennan | +| [HDFS-15403](https://issues.apache.org/jira/browse/HDFS-15403) | NPE in FileIoProvider#transferToSocketFully | Major | . | Hemanth Boyina | Hemanth Boyina | +| [HADOOP-17029](https://issues.apache.org/jira/browse/HADOOP-17029) | ViewFS does not return correct user/group and ACL | Major | fs, viewfs | Abhishek Das | Abhishek Das | +| [HDFS-15421](https://issues.apache.org/jira/browse/HDFS-15421) | IBR leak causes standby NN to be stuck in safe mode | Blocker | namenode | Kihwal Lee | Akira Ajisaka | +| [YARN-9903](https://issues.apache.org/jira/browse/YARN-9903) | Support reservations continue looking for Node Labels | Major | . | Tarun Parimi | Jim Brennan | +| [HADOOP-17032](https://issues.apache.org/jira/browse/HADOOP-17032) | Handle an internal dir in viewfs having multiple children mount points pointing to different filesystems | Major | fs, viewfs | Abhishek Das | Abhishek Das | +| [HDFS-15446](https://issues.apache.org/jira/browse/HDFS-15446) | CreateSnapshotOp fails during edit log loading for /.reserved/raw/path with error java.io.FileNotFoundException: Directory does not exist: /.reserved/raw/path | Major | hdfs | Srinivasu Majeti | Stephen O'Donnell | +| [HADOOP-17081](https://issues.apache.org/jira/browse/HADOOP-17081) | MetricsSystem doesn't start the sink adapters on restart | Minor | metrics | Madhusoodan | Madhusoodan | +| [HDFS-15451](https://issues.apache.org/jira/browse/HDFS-15451) | Restarting name node stuck in safe mode when using provided storage | Major | namenode | shanyu zhao | shanyu zhao | +| [HADOOP-17120](https://issues.apache.org/jira/browse/HADOOP-17120) | Fix failure of docker image creation due to pip2 install error | Major | . | Masatake Iwasaki | Masatake Iwasaki | +| [YARN-10347](https://issues.apache.org/jira/browse/YARN-10347) | Fix double locking in CapacityScheduler#reinitialize in branch-3.1 | Critical | capacity scheduler | Masatake Iwasaki | Masatake Iwasaki | +| [YARN-10348](https://issues.apache.org/jira/browse/YARN-10348) | Allow RM to always cancel tokens after app completes | Major | yarn | Jim Brennan | Jim Brennan | +| [MAPREDUCE-7284](https://issues.apache.org/jira/browse/MAPREDUCE-7284) | TestCombineFileInputFormat#testMissingBlocks fails | Major | test | Akira Ajisaka | Akira Ajisaka | +| [HDFS-14498](https://issues.apache.org/jira/browse/HDFS-14498) | LeaseManager can loop forever on the file for which create has failed | Major | namenode | Sergey Shelukhin | Stephen O'Donnell | +| [HADOOP-17130](https://issues.apache.org/jira/browse/HADOOP-17130) | Configuration.getValByRegex() shouldn't update the results while fetching. | Major | common | Mukund Thakur | Mukund Thakur | +| [HADOOP-17119](https://issues.apache.org/jira/browse/HADOOP-17119) | Jetty upgrade to 9.4.x causes MR app fail with IOException | Major | . | Bilwa S T | Bilwa S T | +| [YARN-4771](https://issues.apache.org/jira/browse/YARN-4771) | Some containers can be skipped during log aggregation after NM restart | Major | nodemanager | Jason Darrell Lowe | Jim Brennan | +| [MAPREDUCE-7051](https://issues.apache.org/jira/browse/MAPREDUCE-7051) | Fix typo in MultipleOutputFormat | Trivial | . | ywheel | ywheel | +| [HDFS-15313](https://issues.apache.org/jira/browse/HDFS-15313) | Ensure inodes in active filesystem are not deleted during snapshot delete | Major | snapshots | Shashikant Banerjee | Shashikant Banerjee | +| [HDFS-14950](https://issues.apache.org/jira/browse/HDFS-14950) | missing libhdfspp libs in dist-package | Major | build, libhdfs++ | Yuan Zhou | Yuan Zhou | +| [HADOOP-17184](https://issues.apache.org/jira/browse/HADOOP-17184) | Add --mvn-custom-repos parameter to yetus calls | Major | build | Mingliang Liu | Mingliang Liu | +| [HDFS-15499](https://issues.apache.org/jira/browse/HDFS-15499) | Clean up httpfs/pom.xml to remove aws-java-sdk-s3 exclusion | Major | httpfs | Mingliang Liu | Mingliang Liu | +| [HADOOP-17164](https://issues.apache.org/jira/browse/HADOOP-17164) | UGI loginUserFromKeytab doesn't set the last login time | Major | security | Sandeep Guggilam | Sandeep Guggilam | +| [YARN-4575](https://issues.apache.org/jira/browse/YARN-4575) | ApplicationResourceUsageReport should return ALL reserved resource | Major | . | Bibin Chundatt | Bibin Chundatt | +| [HADOOP-17196](https://issues.apache.org/jira/browse/HADOOP-17196) | Fix C/C++ standard warnings | Major | build | Gautham Banasandra | Gautham Banasandra | +| [HADOOP-17204](https://issues.apache.org/jira/browse/HADOOP-17204) | Fix typo in Hadoop KMS document | Trivial | documentation, kms | Akira Ajisaka | Xieming Li | +| [HADOOP-17209](https://issues.apache.org/jira/browse/HADOOP-17209) | Erasure Coding: Native library memory leak | Major | native | Sean Chow | Sean Chow | +| [HADOOP-16925](https://issues.apache.org/jira/browse/HADOOP-16925) | MetricsConfig incorrectly loads the configuration whose value is String list in the properties file | Major | metrics | Jiayi Liu | Jiayi Liu | +| [HDFS-14852](https://issues.apache.org/jira/browse/HDFS-14852) | Removing from LowRedundancyBlocks does not remove the block from all queues | Major | namenode | Hui Fei | Hui Fei | +| [HDFS-15290](https://issues.apache.org/jira/browse/HDFS-15290) | NPE in HttpServer during NameNode startup | Major | namenode | Konstantin Shvachko | Simbarashe Dzinamarira | +| [YARN-10430](https://issues.apache.org/jira/browse/YARN-10430) | Log improvements in NodeStatusUpdaterImpl | Minor | nodemanager | Bilwa S T | Bilwa S T | +| [MAPREDUCE-7294](https://issues.apache.org/jira/browse/MAPREDUCE-7294) | Only application master should upload resource to Yarn Shared Cache | Major | mrv2 | zhenzhao wang | zhenzhao wang | +| [MAPREDUCE-7289](https://issues.apache.org/jira/browse/MAPREDUCE-7289) | Fix wrong comment in LongLong.java | Trivial | documentation, examples | Akira Ajisaka | Wanqiang Ji | +| [YARN-9809](https://issues.apache.org/jira/browse/YARN-9809) | NMs should supply a health status when registering with RM | Major | . | Eric Badger | Eric Badger | +| [YARN-10393](https://issues.apache.org/jira/browse/YARN-10393) | MR job live lock caused by completed state container leak in heartbeat between node manager and RM | Major | nodemanager, yarn | zhenzhao wang | Jim Brennan | +| [YARN-10455](https://issues.apache.org/jira/browse/YARN-10455) | TestNMProxy.testNMProxyRPCRetry is not consistent | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17223](https://issues.apache.org/jira/browse/HADOOP-17223) | update org.apache.httpcomponents:httpclient to 4.5.13 and httpcore to 4.4.13 | Blocker | . | Pranav Bheda | Pranav Bheda | +| [HDFS-15628](https://issues.apache.org/jira/browse/HDFS-15628) | HttpFS server throws NPE if a file is a symlink | Major | fs, httpfs | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15627](https://issues.apache.org/jira/browse/HDFS-15627) | Audit log deletes before collecting blocks | Major | logging, namenode | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17309](https://issues.apache.org/jira/browse/HADOOP-17309) | Javadoc warnings and errors are ignored in the precommit jobs | Major | build, documentation | Akira Ajisaka | Akira Ajisaka | +| [HDFS-15639](https://issues.apache.org/jira/browse/HDFS-15639) | [JDK 11] Fix Javadoc errors in hadoop-hdfs-client | Major | . | Takanobu Asanuma | Takanobu Asanuma | +| [HDFS-15618](https://issues.apache.org/jira/browse/HDFS-15618) | Improve datanode shutdown latency | Major | datanode | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15622](https://issues.apache.org/jira/browse/HDFS-15622) | Deleted blocks linger in the replications queue | Major | hdfs | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15641](https://issues.apache.org/jira/browse/HDFS-15641) | DataNode could meet deadlock if invoke refreshNameNode | Critical | . | Hongbing Wang | Hongbing Wang | +| [HDFS-15644](https://issues.apache.org/jira/browse/HDFS-15644) | Failed volumes can cause DNs to stop block reporting | Major | block placement, datanode | Ahmed Hussein | Ahmed Hussein | +| [YARN-10467](https://issues.apache.org/jira/browse/YARN-10467) | ContainerIdPBImpl objects can be leaked in RMNodeImpl.completedContainers | Major | resourcemanager | Haibo Chen | Haibo Chen | +| [HADOOP-17329](https://issues.apache.org/jira/browse/HADOOP-17329) | mvn site commands fails due to MetricsSystemImpl changes | Major | . | Xiaoqiao He | Xiaoqiao He | +| [YARN-10472](https://issues.apache.org/jira/browse/YARN-10472) | Backport YARN-10314 to branch-3.2 | Blocker | yarn | Siyao Meng | Siyao Meng | +| [HADOOP-17340](https://issues.apache.org/jira/browse/HADOOP-17340) | TestLdapGroupsMapping failing -string mismatch in exception validation | Major | test | Steve Loughran | Steve Loughran | +| [HADOOP-17352](https://issues.apache.org/jira/browse/HADOOP-17352) | Update PATCH\_NAMING\_RULE in the personality file | Minor | build | Akira Ajisaka | Akira Ajisaka | +| [HADOOP-17096](https://issues.apache.org/jira/browse/HADOOP-17096) | ZStandardCompressor throws java.lang.InternalError: Error (generic) | Major | io | Stephen Jung (Stripe) | Stephen Jung (Stripe) | +| [HADOOP-17358](https://issues.apache.org/jira/browse/HADOOP-17358) | Improve excessive reloading of Configurations | Major | conf | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15538](https://issues.apache.org/jira/browse/HDFS-15538) | Fix the documentation for dfs.namenode.replication.max-streams in hdfs-default.xml | Major | . | Xieming Li | Xieming Li | +| [HADOOP-17362](https://issues.apache.org/jira/browse/HADOOP-17362) | Doing hadoop ls on Har file triggers too many RPC calls | Major | fs | Ahmed Hussein | Ahmed Hussein | +| [YARN-10485](https://issues.apache.org/jira/browse/YARN-10485) | TimelineConnector swallows InterruptedException | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17360](https://issues.apache.org/jira/browse/HADOOP-17360) | Log the remote address for authentication success | Minor | ipc | Ahmed Hussein | Ahmed Hussein | +| [YARN-10396](https://issues.apache.org/jira/browse/YARN-10396) | Max applications calculation per queue disregards queue level settings in absolute mode | Major | capacity scheduler | Benjamin Teke | Benjamin Teke | +| [HADOOP-17346](https://issues.apache.org/jira/browse/HADOOP-17346) | Fair call queue is defeated by abusive service principals | Major | common, ipc | Ahmed Hussein | Ahmed Hussein | +| [YARN-10470](https://issues.apache.org/jira/browse/YARN-10470) | When building new web ui with root user, the bower install should support it. | Major | build, yarn-ui-v2 | zhuqi | zhuqi | +| [HADOOP-16080](https://issues.apache.org/jira/browse/HADOOP-16080) | hadoop-aws does not work with hadoop-client-api | Major | fs/s3 | Keith Turner | Chao Sun | +| [HDFS-15707](https://issues.apache.org/jira/browse/HDFS-15707) | NNTop counts don't add up as expected | Major | hdfs, metrics, namenode | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15709](https://issues.apache.org/jira/browse/HDFS-15709) | EC: Socket file descriptor leak in StripedBlockChecksumReconstructor | Major | datanode, ec, erasure-coding | Yushi Hayasaka | Yushi Hayasaka | +| [HDFS-15240](https://issues.apache.org/jira/browse/HDFS-15240) | Erasure Coding: dirty buffer causes reconstruction block error | Blocker | datanode, erasure-coding | HuangTao | HuangTao | + + +### TESTS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [YARN-10072](https://issues.apache.org/jira/browse/YARN-10072) | TestCSAllocateCustomResource failures | Major | yarn | Jim Brennan | Jim Brennan | +| [YARN-10161](https://issues.apache.org/jira/browse/YARN-10161) | TestRouterWebServicesREST is corrupting STDOUT | Minor | yarn | Jim Brennan | Jim Brennan | +| [HADOOP-14206](https://issues.apache.org/jira/browse/HADOOP-14206) | TestSFTPFileSystem#testFileExists failure: Invalid encoding for signature | Major | fs, test | John Zhuge | Jim Brennan | +| [MAPREDUCE-7288](https://issues.apache.org/jira/browse/MAPREDUCE-7288) | Fix TestLongLong#testRightShift | Minor | . | Wanqiang Ji | Wanqiang Ji | +| [HDFS-15514](https://issues.apache.org/jira/browse/HDFS-15514) | Remove useless dfs.webhdfs.enabled | Minor | test | Hui Fei | Hui Fei | +| [HADOOP-17205](https://issues.apache.org/jira/browse/HADOOP-17205) | Move personality file from Yetus to Hadoop repository | Major | test, yetus | Chao Sun | Chao Sun | + + +### SUB-TASKS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HADOOP-15775](https://issues.apache.org/jira/browse/HADOOP-15775) | [JDK9] Add missing javax.activation-api dependency | Critical | test | Akira Ajisaka | Akira Ajisaka | +| [HDFS-14096](https://issues.apache.org/jira/browse/HDFS-14096) | [SPS] : Add Support for Storage Policy Satisfier in ViewFs | Major | federation | Ayush Saxena | Ayush Saxena | +| [HADOOP-15787](https://issues.apache.org/jira/browse/HADOOP-15787) | [JDK11] TestIPC.testRTEDuringConnectionSetup fails | Major | . | Akira Ajisaka | Zsolt Venczel | +| [HDFS-14262](https://issues.apache.org/jira/browse/HDFS-14262) | [SBN read] Unclear Log.WARN message in GlobalStateIdContext | Major | hdfs | Shweta | Shweta | +| [YARN-7243](https://issues.apache.org/jira/browse/YARN-7243) | Moving logging APIs over to slf4j in hadoop-yarn-server-resourcemanager | Major | . | Yeliang Cang | Prabhu Joseph | +| [HDFS-13404](https://issues.apache.org/jira/browse/HDFS-13404) | RBF: TestRouterWebHDFSContractAppend.testRenameFileBeingAppended fails | Major | test | Takanobu Asanuma | Takanobu Asanuma | +| [HADOOP-16117](https://issues.apache.org/jira/browse/HADOOP-16117) | Update AWS SDK to 1.11.563 | Major | build, fs/s3 | Steve Loughran | Steve Loughran | +| [HDFS-14590](https://issues.apache.org/jira/browse/HDFS-14590) | [SBN Read] Add the document link to the top page | Major | documentation | Takanobu Asanuma | Takanobu Asanuma | +| [YARN-9791](https://issues.apache.org/jira/browse/YARN-9791) | Queue Mutation API does not allow to remove a config | Major | capacity scheduler | Prabhu Joseph | Prabhu Joseph | +| [HDFS-14822](https://issues.apache.org/jira/browse/HDFS-14822) | [SBN read] Revisit GlobalStateIdContext locking when getting server state id | Major | hdfs | Chen Liang | Chen Liang | +| [HDFS-14785](https://issues.apache.org/jira/browse/HDFS-14785) | [SBN read] Change client logging to be less aggressive | Major | hdfs | Chen Liang | Chen Liang | +| [YARN-9864](https://issues.apache.org/jira/browse/YARN-9864) | Format CS Configuration present in Configuration Store | Major | capacity scheduler | Prabhu Joseph | Prabhu Joseph | +| [YARN-9801](https://issues.apache.org/jira/browse/YARN-9801) | SchedConfCli does not work with https mode | Major | . | Prabhu Joseph | Prabhu Joseph | +| [HDFS-14858](https://issues.apache.org/jira/browse/HDFS-14858) | [SBN read] Allow configurably enable/disable AlignmentContext on NameNode | Major | hdfs | Chen Liang | Chen Liang | +| [HDFS-12979](https://issues.apache.org/jira/browse/HDFS-12979) | StandbyNode should upload FsImage to ObserverNode after checkpointing. | Major | hdfs | Konstantin Shvachko | Chen Liang | +| [YARN-9873](https://issues.apache.org/jira/browse/YARN-9873) | Mutation API Config Change need to update Version Number | Major | . | Prabhu Joseph | Prabhu Joseph | +| [HDFS-14162](https://issues.apache.org/jira/browse/HDFS-14162) | Balancer should work with ObserverNode | Major | . | Konstantin Shvachko | Erik Krogen | +| [YARN-9773](https://issues.apache.org/jira/browse/YARN-9773) | Add QueueMetrics for Custom Resources | Major | . | Manikandan R | Manikandan R | +| [HADOOP-16598](https://issues.apache.org/jira/browse/HADOOP-16598) | Backport "HADOOP-16558 [COMMON+HDFS] use protobuf-maven-plugin to generate protobuf classes" to all active branches | Major | common | Duo Zhang | Duo Zhang | +| [YARN-9950](https://issues.apache.org/jira/browse/YARN-9950) | Unset Ordering Policy of Leaf/Parent queue converted from Parent/Leaf queue respectively | Major | capacity scheduler | Prabhu Joseph | Prabhu Joseph | +| [YARN-9900](https://issues.apache.org/jira/browse/YARN-9900) | Revert to previous state when Invalid Config is applied and Refresh Support in SchedulerConfig Format | Major | capacity scheduler | Prabhu Joseph | Prabhu Joseph | +| [HADOOP-16610](https://issues.apache.org/jira/browse/HADOOP-16610) | Upgrade to yetus 0.11.1 and use emoji vote on github pre commit | Major | build | Duo Zhang | Duo Zhang | +| [YARN-9909](https://issues.apache.org/jira/browse/YARN-9909) | Offline format of YarnConfigurationStore | Major | capacity scheduler | Prabhu Joseph | Prabhu Joseph | +| [YARN-9836](https://issues.apache.org/jira/browse/YARN-9836) | General usability improvements in showSimulationTrace.html | Minor | scheduler-load-simulator | Adam Antal | Adam Antal | +| [HADOOP-16612](https://issues.apache.org/jira/browse/HADOOP-16612) | Track Azure Blob File System client-perceived latency | Major | fs/azure, hdfs-client | Jeetesh Mangwani | Jeetesh Mangwani | +| [HADOOP-16758](https://issues.apache.org/jira/browse/HADOOP-16758) | Refine testing.md to tell user better how to use auth-keys.xml | Minor | fs/s3 | Mingliang Liu | Mingliang Liu | +| [HADOOP-16609](https://issues.apache.org/jira/browse/HADOOP-16609) | Add Jenkinsfile for all active branches | Major | build | Duo Zhang | Akira Ajisaka | +| [HADOOP-16785](https://issues.apache.org/jira/browse/HADOOP-16785) | Improve wasb and abfs resilience on double close() calls | Major | fs/azure | Steve Loughran | Steve Loughran | +| [YARN-10026](https://issues.apache.org/jira/browse/YARN-10026) | Pull out common code pieces from ATS v1.5 and v2 | Major | ATSv2, yarn | Adam Antal | Adam Antal | +| [YARN-10028](https://issues.apache.org/jira/browse/YARN-10028) | Integrate the new abstract log servlet to the JobHistory server | Major | yarn | Adam Antal | Adam Antal | +| [YARN-10083](https://issues.apache.org/jira/browse/YARN-10083) | Provide utility to ask whether an application is in final status | Minor | . | Adam Antal | Adam Antal | +| [YARN-10109](https://issues.apache.org/jira/browse/YARN-10109) | Allow stop and convert from leaf to parent queue in a single Mutation API call | Major | capacity scheduler | Prabhu Joseph | Prabhu Joseph | +| [YARN-10101](https://issues.apache.org/jira/browse/YARN-10101) | Support listing of aggregated logs for containers belonging to an application attempt | Major | log-aggregation, yarn | Adam Antal | Adam Antal | +| [YARN-10022](https://issues.apache.org/jira/browse/YARN-10022) | Create RM Rest API to validate a CapacityScheduler Configuration | Major | . | Kinga Marton | Kinga Marton | +| [HDFS-15173](https://issues.apache.org/jira/browse/HDFS-15173) | RBF: Delete repeated configuration 'dfs.federation.router.metrics.enable' | Minor | documentation, rbf | panlijie | panlijie | +| [YARN-10139](https://issues.apache.org/jira/browse/YARN-10139) | ValidateAndGetSchedulerConfiguration API fails when cluster max allocation \> default 8GB | Major | . | Prabhu Joseph | Prabhu Joseph | +| [HDFS-14731](https://issues.apache.org/jira/browse/HDFS-14731) | [FGL] Remove redundant locking on NameNode. | Major | namenode | Konstantin Shvachko | Konstantin Shvachko | +| [YARN-10194](https://issues.apache.org/jira/browse/YARN-10194) | YARN RMWebServices /scheduler-conf/validate leaks ZK Connections | Blocker | capacityscheduler | Akhil PB | Prabhu Joseph | +| [HDFS-14353](https://issues.apache.org/jira/browse/HDFS-14353) | Erasure Coding: metrics xmitsInProgress become to negative. | Major | datanode, erasure-coding | Baolong Mao | Baolong Mao | +| [HDFS-15305](https://issues.apache.org/jira/browse/HDFS-15305) | Extend ViewFS and provide ViewFSOverloadScheme implementation with scheme configurable. | Major | fs, hadoop-client, hdfs-client, viewfs | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15306](https://issues.apache.org/jira/browse/HDFS-15306) | Make mount-table to read from central place ( Let's say from HDFS) | Major | configuration, hadoop-client | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-16756](https://issues.apache.org/jira/browse/HADOOP-16756) | distcp -update to S3A; abfs, etc always overwrites due to block size mismatch | Major | fs/s3, tools/distcp | Daisuke Kobayashi | Steve Loughran | +| [HDFS-15322](https://issues.apache.org/jira/browse/HDFS-15322) | Make NflyFS to work when ViewFsOverloadScheme's scheme and target uris schemes are same. | Major | fs, nflyFs, viewfs, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15321](https://issues.apache.org/jira/browse/HDFS-15321) | Make DFSAdmin tool to work with ViewFSOverloadScheme | Major | dfsadmin, fs, viewfs | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15330](https://issues.apache.org/jira/browse/HDFS-15330) | Document the ViewFSOverloadScheme details in ViewFS guide | Major | viewfs, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15389](https://issues.apache.org/jira/browse/HDFS-15389) | DFSAdmin should close filesystem and dfsadmin -setBalancerBandwidth should work with ViewFSOverloadScheme | Major | dfsadmin, viewfsOverloadScheme | Ayush Saxena | Ayush Saxena | +| [HDFS-15394](https://issues.apache.org/jira/browse/HDFS-15394) | Add all available fs.viewfs.overload.scheme.target.\.impl classes in core-default.xml bydefault. | Major | configuration, viewfs, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15387](https://issues.apache.org/jira/browse/HDFS-15387) | FSUsage$DF should consider ViewFSOverloadScheme in processPath | Minor | viewfs | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15418](https://issues.apache.org/jira/browse/HDFS-15418) | ViewFileSystemOverloadScheme should represent mount links as non symlinks | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15427](https://issues.apache.org/jira/browse/HDFS-15427) | Merged ListStatus with Fallback target filesystem and InternalDirViewFS. | Major | viewfs | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15429](https://issues.apache.org/jira/browse/HDFS-15429) | mkdirs should work when parent dir is internalDir and fallback configured. | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15436](https://issues.apache.org/jira/browse/HDFS-15436) | Default mount table name used by ViewFileSystem should be configurable | Major | viewfs, viewfsOverloadScheme | Virajith Jalaparti | Virajith Jalaparti | +| [HDFS-15450](https://issues.apache.org/jira/browse/HDFS-15450) | Fix NN trash emptier to work if ViewFSOveroadScheme enabled | Major | namenode, viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HDFS-15462](https://issues.apache.org/jira/browse/HDFS-15462) | Add fs.viewfs.overload.scheme.target.ofs.impl to core-default.xml | Major | configuration, viewfs, viewfsOverloadScheme | Siyao Meng | Siyao Meng | +| [HDFS-15464](https://issues.apache.org/jira/browse/HDFS-15464) | ViewFsOverloadScheme should work when -fs option pointing to remote cluster without mount links | Major | viewfsOverloadScheme | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17101](https://issues.apache.org/jira/browse/HADOOP-17101) | Replace Guava Function with Java8+ Function | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HADOOP-17099](https://issues.apache.org/jira/browse/HADOOP-17099) | Replace Guava Predicate with Java8+ Predicate | Minor | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15478](https://issues.apache.org/jira/browse/HDFS-15478) | When Empty mount points, we are assigning fallback link to self. But it should not use full URI for target fs. | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17100](https://issues.apache.org/jira/browse/HADOOP-17100) | Replace Guava Supplier with Java8+ Supplier in Hadoop | Major | . | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15515](https://issues.apache.org/jira/browse/HDFS-15515) | mkdirs on fallback should throw IOE out instead of suppressing and returning false | Major | . | Uma Maheswara Rao G | Uma Maheswara Rao G | +| [HADOOP-17199](https://issues.apache.org/jira/browse/HADOOP-17199) | Backport HADOOP-13230 list/getFileStatus changes for preserved directory markers | Major | fs/s3 | Steve Loughran | Steve Loughran | +| [HDFS-8631](https://issues.apache.org/jira/browse/HDFS-8631) | WebHDFS : Support setQuota | Major | . | nijel | Chao Sun | +| [YARN-10332](https://issues.apache.org/jira/browse/YARN-10332) | RESOURCE\_UPDATE event was repeatedly registered in DECOMMISSIONING state | Minor | resourcemanager | yehuanhuan | yehuanhuan | +| [HDFS-15459](https://issues.apache.org/jira/browse/HDFS-15459) | TestBlockTokenWithDFSStriped fails intermittently | Major | hdfs | Ahmed Hussein | Ahmed Hussein | +| [HDFS-15461](https://issues.apache.org/jira/browse/HDFS-15461) | TestDFSClientRetries#testGetFileChecksum fails intermittently | Major | dfsclient, test | Ahmed Hussein | Ahmed Hussein | +| [HDFS-9776](https://issues.apache.org/jira/browse/HDFS-9776) | TestHAAppend#testMultipleAppendsDuringCatchupTailing is flaky | Major | . | Vinayakumar B | Ahmed Hussein | +| [HADOOP-17330](https://issues.apache.org/jira/browse/HADOOP-17330) | Backport HADOOP-16005-"NativeAzureFileSystem does not support setXAttr" to branch-3.2 | Major | fs/azure | Sally Zuo | Sally Zuo | +| [HDFS-15643](https://issues.apache.org/jira/browse/HDFS-15643) | EC: Fix checksum computation in case of native encoders | Blocker | . | Ahmed Hussein | Ayush Saxena | +| [HADOOP-17343](https://issues.apache.org/jira/browse/HADOOP-17343) | Upgrade aws-java-sdk to 1.11.901 | Minor | build, fs/s3 | Dongjoon Hyun | Steve Loughran | +| [HADOOP-17325](https://issues.apache.org/jira/browse/HADOOP-17325) | WASB: Test failures | Major | fs/azure, test | Sneha Vijayarajan | Steve Loughran | +| [HDFS-15708](https://issues.apache.org/jira/browse/HDFS-15708) | TestURLConnectionFactory fails by NoClassDefFoundError in branch-3.3 and branch-3.2 | Blocker | test | Akira Ajisaka | Chao Sun | + + +### OTHER: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [HDFS-14394](https://issues.apache.org/jira/browse/HDFS-14394) | Add -std=c99 / -std=gnu99 to libhdfs compile flags | Major | hdfs-client, libhdfs, native | Sahil Takiar | Sahil Takiar | +| [HADOOP-16365](https://issues.apache.org/jira/browse/HADOOP-16365) | Upgrade jackson-databind to 2.9.9 | Major | build | Shweta | Shweta | +| [HADOOP-16491](https://issues.apache.org/jira/browse/HADOOP-16491) | Upgrade jetty version to 9.3.27 | Major | . | Hrishikesh Gadre | Hrishikesh Gadre | +| [HADOOP-16542](https://issues.apache.org/jira/browse/HADOOP-16542) | Update commons-beanutils version to 1.9.4 | Major | . | Wei-Chiu Chuang | Kevin Su | +| [YARN-9730](https://issues.apache.org/jira/browse/YARN-9730) | Support forcing configured partitions to be exclusive based on app node label | Major | . | Jonathan Hung | Jonathan Hung | +| [HADOOP-16675](https://issues.apache.org/jira/browse/HADOOP-16675) | Upgrade jackson-databind to 2.9.10.1 | Blocker | security | Wei-Chiu Chuang | Lisheng Sun | +| [HDFS-14959](https://issues.apache.org/jira/browse/HDFS-14959) | [SBNN read] access time should be turned off | Major | documentation | Wei-Chiu Chuang | Chao Sun | +| [HADOOP-16784](https://issues.apache.org/jira/browse/HADOOP-16784) | Update the year to 2020 | Major | . | Ayush Saxena | Ayush Saxena | +| [HADOOP-16803](https://issues.apache.org/jira/browse/HADOOP-16803) | Upgrade jackson-databind to 2.9.10.2 | Blocker | security | Akira Ajisaka | Masatake Iwasaki | +| [HADOOP-16871](https://issues.apache.org/jira/browse/HADOOP-16871) | Upgrade Netty version to 4.1.45.Final to handle CVE-2019-20444,CVE-2019-16869 | Major | . | Aray Chenchu Sukesh | Aray Chenchu Sukesh | +| [HADOOP-16647](https://issues.apache.org/jira/browse/HADOOP-16647) | Support OpenSSL 1.1.1 LTS | Critical | security | Wei-Chiu Chuang | Rakesh Radhakrishnan | +| [HADOOP-16982](https://issues.apache.org/jira/browse/HADOOP-16982) | Update Netty to 4.1.48.Final | Blocker | . | Wei-Chiu Chuang | Lisheng Sun | +| [HADOOP-16990](https://issues.apache.org/jira/browse/HADOOP-16990) | Update Mockserver | Major | . | Wei-Chiu Chuang | Attila Doroszlai | +| [YARN-10540](https://issues.apache.org/jira/browse/YARN-10540) | Node page is broken in YARN UI1 and UI2 including RMWebService api for nodes | Critical | webapp | Sunil G | Jim Brennan | +| [HADOOP-17445](https://issues.apache.org/jira/browse/HADOOP-17445) | Update the year to 2021 | Major | . | Xiaoqiao He | Xiaoqiao He | + + diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.2/RELEASENOTES.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.2/RELEASENOTES.md new file mode 100644 index 0000000000000..c4f4aa6c03b3d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.2/RELEASENOTES.md @@ -0,0 +1,86 @@ + + +# Apache Hadoop 3.2.2 Release Notes + +These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements. + + +--- + +* [HADOOP-16460](https://issues.apache.org/jira/browse/HADOOP-16460) | *Major* | **ABFS: fix for Sever Name Indication (SNI)** + +ABFS: Bug fix to support Server Name Indication (SNI). + + +--- + +* [HDFS-14890](https://issues.apache.org/jira/browse/HDFS-14890) | *Blocker* | **Setting permissions on name directory fails on non posix compliant filesystems** + +- Fixed namenode/journal startup on Windows. + + +--- + +* [HDFS-14905](https://issues.apache.org/jira/browse/HDFS-14905) | *Major* | **Backport HDFS persistent memory read cache support to branch-3.2** + +Non-volatile storage class memory (SCM, also known as persistent memory) is supported in HDFS cache. To enable SCM cache, user just needs to configure SCM volume for property “dfs.datanode.cache.pmem.dirs” in hdfs-site.xml. And all HDFS cache directives keep unchanged. There are two implementations for HDFS SCM Cache, one is pure java code implementation and the other is native PMDK based implementation. The latter implementation can bring user better performance gain in cache write and cache read. If PMDK native libs could be loaded, it will use PMDK based implementation otherwise it will fallback to java code implementation. To enable PMDK based implementation, user should install PMDK library by referring to the official site http://pmem.io/. Then, build Hadoop with PMDK support by referring to "PMDK library build options" section in \`BUILDING.txt\` in the source code. If multiple SCM volumes are configured, a round-robin policy is used to select an available volume for caching a block. Consistent with DRAM cache, SCM cache also has no cache eviction mechanism. When DataNode receives a data read request from a client, if the corresponding block is cached into SCM, DataNode will instantiate an InputStream with the block location path on SCM (pure java implementation) or cache address on SCM (PMDK based implementation). Once the InputStream is created, DataNode will send the cached data to the client. Please refer "Centralized Cache Management" guide for more details. + + +--- + +* [HDFS-12943](https://issues.apache.org/jira/browse/HDFS-12943) | *Major* | **Consistent Reads from Standby Node** + +Observer is a new type of a NameNode in addition to Active and Standby Nodes in HA settings. An Observer Node maintains a replica of the namespace same as a Standby Node. It additionally allows execution of clients read requests. + +To ensure read-after-write consistency within a single client, a state ID is introduced in RPC headers. The Observer responds to the client request only after its own state has caught up with the client’s state ID, which it previously received from the Active NameNode. + +Clients can explicitly invoke a new client protocol call msync(), which ensures that subsequent reads by this client from an Observer are consistent. + +A new client-side ObserverReadProxyProvider is introduced to provide automatic switching between Active and Observer NameNodes for submitting respectively write and read requests. + + +--- + +* [HADOOP-16771](https://issues.apache.org/jira/browse/HADOOP-16771) | *Major* | **Update checkstyle to 8.26 and maven-checkstyle-plugin to 3.1.0** + +Updated checkstyle to 8.26 and updated maven-checkstyle-plugin to 3.1.0. + + +--- + +* [HDFS-15281](https://issues.apache.org/jira/browse/HDFS-15281) | *Major* | **ZKFC ignores dfs.namenode.rpc-bind-host and uses dfs.namenode.rpc-address to bind to host address** + +ZKFC binds host address to "dfs.namenode.servicerpc-bind-host", if configured. Otherwise, it binds to "dfs.namenode.rpc-bind-host". If neither of those is configured, ZKFC binds itself to NameNode RPC server address (effectively "dfs.namenode.rpc-address"). + + +--- + +* [HADOOP-17024](https://issues.apache.org/jira/browse/HADOOP-17024) | *Major* | **ListStatus on ViewFS root (ls "/") should list the linkFallBack root (configured target root).** + +ViewFS#listStatus on root("/") considers listing from fallbackLink if available. If the same directory name is present in configured mount path as well as in fallback link, then only the configured mount path will be listed in the returned result. + + +--- + +* [YARN-9809](https://issues.apache.org/jira/browse/YARN-9809) | *Major* | **NMs should supply a health status when registering with RM** + +Improved node registration with node health status. + + + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index 1ce23a0eb81f2..dd9f41a7a3527 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -124,6 +124,25 @@ public void initializeMemberVariables() { xmlPrefixToSkipCompare.add("fs.adl."); xmlPropsToSkipCompare.add("fs.AbstractFileSystem.adl.impl"); + // ViewfsOverloadScheme target fs impl property keys are dynamically + // constructed and they are advanced props. + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.abfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.abfss.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.file.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.ftp.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.hdfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.http.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.https.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.ofs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.o3fs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.oss.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.s3a.impl"); + xmlPropsToSkipCompare. + add("fs.viewfs.overload.scheme.target.swebhdfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.webhdfs.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.wasb.impl"); + xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.swift.impl"); + // Azure properties are in a different class // - org.apache.hadoop.fs.azure.AzureNativeFileSystemStore // - org.apache.hadoop.fs.azure.SASKeyGeneratorImpl diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java index cf42219f6d8b5..eae9a1fc744c0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java @@ -38,7 +38,7 @@ import org.w3c.dom.NodeList; import org.xml.sax.InputSource; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.http.HttpServer2; import org.junit.BeforeClass; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 81c53959478b4..a2273ef34faba 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -1062,6 +1062,38 @@ public void testRelativeIncludes() throws Exception { new File(new File(relConfig).getParent()).delete(); } + @Test + public void testRelativeIncludesWithLoadingViaUri() throws Exception { + tearDown(); + File configFile = new File("./tmp/test-config.xml"); + File configFile2 = new File("./tmp/test-config2.xml"); + + new File(configFile.getParent()).mkdirs(); + out = new BufferedWriter(new FileWriter(configFile2)); + startConfig(); + appendProperty("a", "b"); + endConfig(); + + out = new BufferedWriter(new FileWriter(configFile)); + startConfig(); + // Add the relative path instead of the absolute one. + startInclude(configFile2.getName()); + endInclude(); + appendProperty("c", "d"); + endConfig(); + + // verify that the includes file contains all properties + Path fileResource = new Path(configFile.toURI()); + conf.addResource(fileResource); + assertEquals("b", conf.get("a")); + assertEquals("d", conf.get("c")); + + // Cleanup + configFile.delete(); + configFile2.delete(); + new File(configFile.getParent()).delete(); + } + @Test public void testIntegerRanges() { Configuration conf = new Configuration(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java index efb813164af69..2c0d6025f2688 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java @@ -47,8 +47,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; public class TestConfigurationDeprecation { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java index e15e699534d31..51d23d8038b0b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java @@ -53,8 +53,9 @@ public void testReloadNotQuiet() throws Throwable { SubConf conf = new SubConf(true); conf.setQuietMode(false); assertFalse(conf.isReloaded()); + // adding a resource does not force a reload. conf.addResource("not-a-valid-resource"); - assertTrue(conf.isReloaded()); + assertFalse(conf.isReloaded()); try { Properties properties = conf.getProperties(); fail("Should not have got here"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java index 39d3bae655d85..4948df9b1f4cb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java @@ -18,8 +18,8 @@ package org.apache.hadoop.conf; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java index eca23a7f2a372..ce4e7ffeb519a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java @@ -43,7 +43,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.primitives.Longs; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Longs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java index 55a9280d6260a..9b8638faa4b22 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java @@ -32,7 +32,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestValueQueue { Logger LOG = LoggerFactory.getLogger(TestValueQueue.class); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java index 7804c73fd2afc..616c66b0748db 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java @@ -62,7 +62,7 @@ import org.junit.rules.Timeout; import org.mockito.Mockito; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestLoadBalancingKMSClientProvider { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java index 73fd2802ab1d1..2aa5407c056af 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java @@ -37,8 +37,8 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Supplier; -import com.google.common.util.concurrent.Uninterruptibles; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java index a4ccee3f7f58e..8065b3f61f52c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java @@ -862,7 +862,8 @@ private void assertListFilesFinds(Path dir, Path subdir) throws IOException { found); } - private void assertListStatusFinds(Path dir, Path subdir) throws IOException { + protected void assertListStatusFinds(Path dir, Path subdir) + throws IOException { FileStatus[] stats = fs.listStatus(dir); boolean found = false; StringBuilder builder = new StringBuilder(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java index b3c38475d435b..67a933bb9e39c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java @@ -21,22 +21,31 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.test.HadoopTestBase; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.util.BlockingThreadPoolExecutorService; +import org.assertj.core.api.Assertions; import org.junit.Test; -import java.security.PrivilegedExceptionAction; -import java.util.concurrent.Semaphore; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_CREATION_PARALLEL_COUNT; import static org.apache.hadoop.test.LambdaTestUtils.intercept; -import static org.junit.Assert.*; import static org.mockito.Mockito.*; -public class TestFileSystemCaching { +public class TestFileSystemCaching extends HadoopTestBase { @Test public void testCacheEnabled() throws Exception { @@ -336,4 +345,134 @@ public void testCacheIncludesURIUserInfo() throws Throwable { assertNotEquals(keyA, new FileSystem.Cache.Key( new URI("wasb://a:password@account.blob.core.windows.net"), conf)); } + + + /** + * Single semaphore: no surplus FS instances will be created + * and then discarded. + */ + @Test + public void testCacheSingleSemaphoredConstruction() throws Exception { + FileSystem.Cache cache = semaphoredCache(1); + createFileSystems(cache, 10); + Assertions.assertThat(cache.getDiscardedInstances()) + .describedAs("Discarded FS instances") + .isEqualTo(0); + } + + /** + * Dual semaphore: thread 2 will get as far as + * blocking in the initialize() method while awaiting + * thread 1 to complete its initialization. + *

    + * The thread 2 FS instance will be discarded. + * All other threads will block for a cache semaphore, + * so when they are given an opportunity to proceed, + * they will find that an FS instance exists. + */ + @Test + public void testCacheDualSemaphoreConstruction() throws Exception { + FileSystem.Cache cache = semaphoredCache(2); + createFileSystems(cache, 10); + Assertions.assertThat(cache.getDiscardedInstances()) + .describedAs("Discarded FS instances") + .isEqualTo(1); + } + + /** + * Construct the FS instances in a cache with effectively no + * limit on the number of instances which can be created + * simultaneously. + *

    + * This is the effective state before HADOOP-17313. + *

    + * All but one thread's FS instance will be discarded. + */ + @Test + public void testCacheLargeSemaphoreConstruction() throws Exception { + FileSystem.Cache cache = semaphoredCache(999); + int count = 10; + createFileSystems(cache, count); + Assertions.assertThat(cache.getDiscardedInstances()) + .describedAs("Discarded FS instances") + .isEqualTo(count -1); + } + + /** + * Create a cache with a given semaphore size. + * @param semaphores number of semaphores + * @return the cache. + */ + private FileSystem.Cache semaphoredCache(final int semaphores) { + final Configuration conf1 = new Configuration(); + conf1.setInt(FS_CREATION_PARALLEL_COUNT, semaphores); + FileSystem.Cache cache = new FileSystem.Cache(conf1); + return cache; + } + + /** + * Attempt to create {@code count} filesystems in parallel, + * then assert that they are all equal. + * @param cache cache to use + * @param count count of filesystems to instantiate + */ + private void createFileSystems(final FileSystem.Cache cache, final int count) + throws URISyntaxException, InterruptedException, + java.util.concurrent.ExecutionException { + final Configuration conf = new Configuration(); + conf.set("fs.blocking.impl", BlockingInitializer.NAME); + // only one instance can be created at a time. + URI uri = new URI("blocking://a"); + ListeningExecutorService pool = + MoreExecutors.listeningDecorator( + BlockingThreadPoolExecutorService.newInstance(count * 2, 0, + 10, TimeUnit.SECONDS, + "creation-threads")); + + // submit a set of requests to create an FS instance. + // the semaphore will block all but one, and that will block until + // it is allowed to continue + List> futures = new ArrayList<>(count); + + // acquire the semaphore so blocking all FS instances from + // being initialized. + Semaphore semaphore = BlockingInitializer.SEM; + semaphore.acquire(); + + for (int i = 0; i < count; i++) { + futures.add(pool.submit( + () -> cache.get(uri, conf))); + } + // now let all blocked initializers free + semaphore.release(); + // get that first FS + FileSystem createdFS = futures.get(0).get(); + // verify all the others are the same instance + for (int i = 1; i < count; i++) { + FileSystem fs = futures.get(i).get(); + Assertions.assertThat(fs) + .isSameAs(createdFS); + } + } + + /** + * An FS which blocks in initialize() until it can acquire the shared + * semaphore (which it then releases). + */ + private static final class BlockingInitializer extends LocalFileSystem { + + private static final String NAME = BlockingInitializer.class.getName(); + + private static final Semaphore SEM = new Semaphore(1); + + @Override + public void initialize(URI uri, Configuration conf) throws IOException { + try { + SEM.acquire(); + SEM.release(); + } catch (InterruptedException e) { + throw new IOException(e.toString(), e); + } + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java index f0057a6c6d902..6cd450610b390 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java @@ -137,6 +137,12 @@ public Token[] addDelegationTokens(String renewer, Credentials creds) void setQuota(Path f, long namespaceQuota, long storagespaceQuota); void setQuotaByStorageType(Path f, StorageType type, long quota); StorageStatistics getStorageStatistics(); + + /* + Not passed through as the inner implementation will miss features + of the filter such as checksums. + */ + MultipartUploaderBuilder createMultipartUploader(Path basePath); } @Test @@ -278,6 +284,23 @@ public void testRenameOptions() throws Exception { verify(mockFs).rename(eq(src), eq(dst), eq(opt)); } + /** + * Verify that filterFS always returns false, even if local/rawlocal + * ever implement multipart uploads. + */ + @Test + public void testFilterPathCapabilites() throws Exception { + try (FilterFileSystem flfs = new FilterLocalFileSystem()) { + flfs.initialize(URI.create("filter:/"), conf); + Path src = new Path("/src"); + assertFalse( + "hasPathCapability(FS_MULTIPART_UPLOADER) should have failed for " + + flfs, + flfs.hasPathCapability(src, + CommonPathCapabilities.FS_MULTIPART_UPLOADER)); + } + } + private void checkInit(FilterFileSystem fs, boolean expectInit) throws Exception { URI uri = URI.create("filter:/"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java index 72ae296c957b5..e3c4ee05c8c8f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java @@ -23,6 +23,7 @@ import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -34,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.StringUtils; import org.junit.Before; import org.junit.BeforeClass; @@ -175,7 +177,20 @@ public void testCopyDirFromWindowsLocalPath() throws Exception { checkPut(dirPath, targetDir, true); } - + @Test + public void testCopyBetweenFsEqualPath() throws Exception { + Path testRoot = new Path(testRootDir, "testPutFile"); + lfs.delete(testRoot, true); + lfs.mkdirs(testRoot); + + Path filePath = new Path(testRoot, "sameSourceTarget"); + lfs.create(filePath).close(); + final FileStatus status = lfs.getFileStatus(filePath); + LambdaTestUtils.intercept(PathOperationException.class, () -> + FileUtil.copy(lfs, status, lfs, filePath, false, true, conf) + ); + } + private void checkPut(Path srcPath, Path targetDir, boolean useWindowsPath) throws Exception { lfs.delete(targetDir, true); @@ -683,4 +698,27 @@ public void testPutSrcFileNoPerm() lfs.setPermission(src, new FsPermission((short)0755)); } } + + @Test + public void testLazyPersistDirectOverwrite() throws Exception { + Path testRoot = new Path(testRootDir, "testLazyPersistDirectOverwrite"); + try { + lfs.delete(testRoot, true); + lfs.mkdirs(testRoot); + Path filePath = new Path(testRoot, new Path("srcFile")); + lfs.create(filePath).close(); + // Put with overwrite in direct mode. + String[] argv = + new String[] {"-put", "-f", "-l", "-d", filePath.toString(), + filePath.toString()}; + assertEquals(0, shell.run(argv)); + + // Put without overwrite in direct mode shouldn't be success. + argv = new String[] {"-put", "-l", "-d", filePath.toString(), + filePath.toString()}; + assertNotEquals(0, shell.run(argv)); + } finally { + lfs.delete(testRoot, true); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java index 2e7cb5d6342b3..62e7990674d3b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java @@ -101,7 +101,7 @@ public void testTouch() throws Exception { { assertThat( "Expected successful touch on a non-existent file with -c option", - shellRun("-touch", "-c", newFileName), is(not(0))); + shellRun("-touch", "-c", newFileName), is(0)); assertThat(lfs.exists(newFile), is(false)); } @@ -140,7 +140,7 @@ public void testTouch() throws Exception { Date dateObj = parseTimestamp(strTime); assertThat( - "Expected successful touch with a specified modificatiom time", + "Expected successful touch with a specified modification time", shellRun("-touch", "-m", "-t", strTime, newFileName), is(0)); // Verify if modification time is recorded correctly (and access time // remains unchanged). @@ -179,6 +179,16 @@ public void testTouch() throws Exception { assertThat("Expected failed touch with a missing timestamp", shellRun("-touch", "-t", newFileName), is(not(0))); } + + // Verify -c option when file exists. + String strTime = formatTimestamp(System.currentTimeMillis()); + Date dateObj = parseTimestamp(strTime); + assertThat( + "Expected successful touch on a non-existent file with -c option", + shellRun("-touch", "-c", "-t", strTime, newFileName), is(0)); + FileStatus fileStatus = lfs.getFileStatus(newFile); + assertThat(fileStatus.getAccessTime(), is(dateObj.getTime())); + assertThat(fileStatus.getModificationTime(), is(dateObj.getTime())); } private String formatTimestamp(long timeInMillis) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index 2097633839112..711ab94fdf123 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -41,7 +41,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CompletableFuture; import static org.apache.hadoop.fs.Options.ChecksumOpt; @@ -248,6 +247,9 @@ CompletableFuture openFileWithOptions( CompletableFuture openFileWithOptions( Path path, OpenFileParameters parameters) throws IOException; + + MultipartUploaderBuilder createMultipartUploader(Path basePath) + throws IOException; } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java index c58e731b82b21..6415df6310fc2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java @@ -33,7 +33,10 @@ import java.util.HashSet; import java.util.Set; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + /** * This test class checks basic operations with {@link HarFileSystem} including diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index 517f6ce016544..1384bb6a17f38 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.permission.FsPermission; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java index c0d79c145f781..18f88084c3f48 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java @@ -34,7 +34,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java index 79222ce67d6cf..85e1f849998c3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java @@ -18,23 +18,31 @@ package org.apache.hadoop.fs.contract; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StreamCapabilities; + import org.junit.Test; import org.junit.AssumptionViolatedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.FileNotFoundException; import java.io.IOException; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertCapabilities; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.getFileStatusEventually; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeTextFile; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; /** * Test creating files, overwrite options etc. @@ -42,6 +50,9 @@ public abstract class AbstractContractCreateTest extends AbstractFSContractTestBase { + private static final Logger LOG = + LoggerFactory.getLogger(AbstractContractCreateTest.class); + /** * How long to wait for a path to become visible. */ @@ -436,4 +447,145 @@ private void createFile(Path path) throws IOException { writeDataset(fs, path, data, data.length, 1024 * 1024, true); } + + @Test + public void testSyncable() throws Throwable { + describe("test declared and actual Syncable behaviors"); + FileSystem fs = getFileSystem(); + boolean supportsFlush = isSupported(SUPPORTS_HFLUSH); + boolean supportsSync = isSupported(SUPPORTS_HSYNC); + boolean metadataUpdatedOnHSync = isSupported(METADATA_UPDATED_ON_HSYNC); + + validateSyncableSemantics(fs, + supportsSync, + supportsFlush, + metadataUpdatedOnHSync); + } + + /** + * Validate the semantics of syncable. + * @param fs filesystem + * @param supportsSync sync is present + * @param supportsFlush flush is present. + * @param metadataUpdatedOnHSync Is the metadata updated after an hsync? + * @throws IOException failure + */ + protected void validateSyncableSemantics(final FileSystem fs, + final boolean supportsSync, + final boolean supportsFlush, + final boolean metadataUpdatedOnHSync) + throws IOException { + Path path = methodPath(); + LOG.info("Expecting files under {} to have supportsSync={}" + + " and supportsFlush={}; metadataUpdatedOnHSync={}", + path, supportsSync, supportsFlush, metadataUpdatedOnHSync); + + try (FSDataOutputStream out = fs.create(path, true)) { + LOG.info("Created output stream {}", out); + + // probe stream for support for flush/sync, whose capabilities + // of supports/does not support must match what is expected + String[] hflushCapabilities = { + StreamCapabilities.HFLUSH + }; + String[] hsyncCapabilities = { + StreamCapabilities.HSYNC + }; + if (supportsFlush) { + assertCapabilities(out, hflushCapabilities, null); + } else { + assertCapabilities(out, null, hflushCapabilities); + } + if (supportsSync) { + assertCapabilities(out, hsyncCapabilities, null); + } else { + assertCapabilities(out, null, hsyncCapabilities); + } + + // write one byte, then hflush it + out.write('a'); + try { + out.hflush(); + if (!supportsFlush) { + // FSDataOutputStream silently downgrades to flush() here. + // This is not good, but if changed some applications + // break writing to some stores. + LOG.warn("FS doesn't support Syncable.hflush()," + + " but doesn't reject it either."); + } + } catch (UnsupportedOperationException e) { + if (supportsFlush) { + throw new AssertionError("hflush not supported", e); + } + } + + // write a second byte, then hsync it. + out.write('b'); + try { + out.hsync(); + } catch (UnsupportedOperationException e) { + if (supportsSync) { + throw new AssertionError("HSync not supported", e); + } + } + + if (supportsSync) { + // if sync really worked, data MUST be visible here + + // first the metadata which MUST be present + final FileStatus st = fs.getFileStatus(path); + if (metadataUpdatedOnHSync) { + // not all stores reliably update it, HDFS/webHDFS in particular + assertEquals("Metadata not updated during write " + st, + 2, st.getLen()); + } + + // there's no way to verify durability, but we can + // at least verify a new file input stream reads + // the data + try (FSDataInputStream in = fs.open(path)) { + assertEquals('a', in.read()); + assertEquals('b', in.read()); + assertEquals(-1, in.read()); + LOG.info("Successfully read synced data on a new reader {}", in); + } + } else { + // no sync. Let's do a flush and see what happens. + out.flush(); + // Now look at the filesystem. + try (FSDataInputStream in = fs.open(path)) { + int c = in.read(); + if (c == -1) { + // nothing was synced; sync and flush really aren't there. + LOG.info("sync and flush are declared unsupported" + + " -flushed changes were not saved"); + + } else { + LOG.info("sync and flush are declared unsupported" + + " - but the stream does offer some sync/flush semantics"); + } + // close outside a finally as we do want to see any exception raised. + in.close(); + + } catch (FileNotFoundException e) { + // that's OK if it's an object store, but not if its a real + // FS + if (!isSupported(IS_BLOBSTORE)) { + throw e; + } else { + LOG.warn( + "Output file was not created; this is an object store with different" + + " visibility semantics"); + } + } + } + // close the output stream + out.close(); + + final String stats = ioStatisticsSourceToString(out); + if (!stats.isEmpty()) { + LOG.info("IOStatistics {}", stats); + } + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java index 328c8e1377904..08df1d4d883a6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java @@ -49,18 +49,17 @@ public void testDeleteNonexistentPathRecursive() throws Throwable { Path path = path("testDeleteNonexistentPathRecursive"); assertPathDoesNotExist("leftover", path); ContractTestUtils.rejectRootOperation(path); - assertFalse("Returned true attempting to delete" + assertFalse("Returned true attempting to recursively delete" + " a nonexistent path " + path, - getFileSystem().delete(path, false)); + getFileSystem().delete(path, true)); } - @Test public void testDeleteNonexistentPathNonRecursive() throws Throwable { Path path = path("testDeleteNonexistentPathNonRecursive"); assertPathDoesNotExist("leftover", path); ContractTestUtils.rejectRootOperation(path); - assertFalse("Returned true attempting to recursively delete" + assertFalse("Returned true attempting to non recursively delete" + " a nonexistent path " + path, getFileSystem().delete(path, false)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java index f63314d39292e..c0d9733bbb9a7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.UUID; +import org.assertj.core.api.Assertions; + import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; @@ -148,6 +150,7 @@ public void testListLocatedStatusEmptyDirectory() throws IOException { public void testComplexDirActions() throws Throwable { TreeScanResults tree = createTestTree(); checkListStatusStatusComplexDir(tree); + checkListStatusIteratorComplexDir(tree); checkListLocatedStatusStatusComplexDir(tree); checkListFilesComplexDirNonRecursive(tree); checkListFilesComplexDirRecursive(tree); @@ -169,6 +172,34 @@ protected void checkListStatusStatusComplexDir(TreeScanResults tree) listing.assertSizeEquals("listStatus()", TREE_FILES, TREE_WIDTH, 0); } + /** + * Test {@link FileSystem#listStatusIterator(Path)} on a complex + * directory tree. + * @param tree directory tree to list. + * @throws Throwable + */ + protected void checkListStatusIteratorComplexDir(TreeScanResults tree) + throws Throwable { + describe("Expect listStatusIterator to list all entries in top dir only"); + + FileSystem fs = getFileSystem(); + TreeScanResults listing = new TreeScanResults( + fs.listStatusIterator(tree.getBasePath())); + listing.assertSizeEquals("listStatus()", TREE_FILES, TREE_WIDTH, 0); + + List resWithoutCheckingHasNext = + iteratorToListThroughNextCallsAlone(fs + .listStatusIterator(tree.getBasePath())); + + List resWithCheckingHasNext = iteratorToList(fs + .listStatusIterator(tree.getBasePath())); + Assertions.assertThat(resWithCheckingHasNext) + .describedAs("listStatusIterator() should return correct " + + "results even if hasNext() calls are not made.") + .hasSameElementsAs(resWithoutCheckingHasNext); + + } + /** * Test {@link FileSystem#listLocatedStatus(Path)} on a complex * directory tree. @@ -322,6 +353,45 @@ public void testListStatusFile() throws Throwable { verifyStatusArrayMatchesFile(f, getFileSystem().listStatus(f)); } + @Test + public void testListStatusIteratorFile() throws Throwable { + describe("test the listStatusIterator(path) on a file"); + Path f = touchf("listStItrFile"); + + List statusList = (List) iteratorToList( + getFileSystem().listStatusIterator(f)); + validateListingForFile(f, statusList, false); + + List statusList2 = + (List) iteratorToListThroughNextCallsAlone( + getFileSystem().listStatusIterator(f)); + validateListingForFile(f, statusList2, true); + } + + /** + * Validate listing result for an input path which is file. + * @param f file. + * @param statusList list status of a file. + * @param nextCallAlone whether the listing generated just using + * next() calls. + */ + private void validateListingForFile(Path f, + List statusList, + boolean nextCallAlone) { + String msg = String.format("size of file list returned using %s should " + + "be 1", nextCallAlone ? + "next() calls alone" : "hasNext() and next() calls"); + Assertions.assertThat(statusList) + .describedAs(msg) + .hasSize(1); + Assertions.assertThat(statusList.get(0).getPath()) + .describedAs("path returned should match with the input path") + .isEqualTo(f); + Assertions.assertThat(statusList.get(0).isFile()) + .describedAs("path returned should be a file") + .isEqualTo(true); + } + @Test public void testListFilesFile() throws Throwable { describe("test the listStatus(path) on a file"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 7a8f0830eda37..90e12a84487a0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -26,8 +26,10 @@ import java.util.HashMap; import java.util.Map; import java.util.Random; +import java.util.concurrent.CompletableFuture; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.Test; import org.slf4j.Logger; @@ -35,22 +37,32 @@ import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.CommonPathCapabilities; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.MultipartUploader; -import org.apache.hadoop.fs.MultipartUploaderFactory; import org.apache.hadoop.fs.PartHandle; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathHandle; import org.apache.hadoop.fs.UploadHandle; +import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.DurationInfo; import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyPathExists; +import static org.apache.hadoop.fs.impl.FutureIOSupport.awaitFuture; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; import static org.apache.hadoop.test.LambdaTestUtils.eventually; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +/** + * Tests of multipart uploads. + *

    + * Note: some of the tests get a random uploader between + * the two which are available. If tests fail intermittently, + * it may be because different uploaders are being selected. + */ public abstract class AbstractContractMultipartUploaderTest extends AbstractFSContractTestBase { @@ -63,36 +75,46 @@ public abstract class AbstractContractMultipartUploaderTest extends */ protected static final int SMALL_FILE = 100; - private MultipartUploader mpu; - private MultipartUploader mpu2; + protected static final int CONSISTENCY_INTERVAL = 1000; + + private MultipartUploader uploader0; + private MultipartUploader uploader1; private final Random random = new Random(); private UploadHandle activeUpload; private Path activeUploadPath; - protected String getMethodName() { - return methodName.getMethodName(); - } - @Override public void setup() throws Exception { super.setup(); - Configuration conf = getContract().getConf(); - mpu = MultipartUploaderFactory.get(getFileSystem(), conf); - mpu2 = MultipartUploaderFactory.get(getFileSystem(), conf); + + final FileSystem fs = getFileSystem(); + Path testPath = getContract().getTestPath(); + uploader0 = fs.createMultipartUploader(testPath).build(); + uploader1 = fs.createMultipartUploader(testPath).build(); } @Override public void teardown() throws Exception { - if (mpu!= null && activeUpload != null) { + MultipartUploader uploader = getUploader(1); + if (uploader != null) { + if (activeUpload != null) { + abortUploadQuietly(activeUpload, activeUploadPath); + } try { - mpu.abort(activeUploadPath, activeUpload); - } catch (FileNotFoundException ignored) { - /* this is fine */ + // round off with an abort of all uploads + Path teardown = getContract().getTestPath(); + LOG.info("Teardown: aborting outstanding uploads under {}", teardown); + CompletableFuture f + = uploader.abortUploadsUnderPath(teardown); + f.get(); + LOG.info("Statistics {}", + ioStatisticsSourceToString(uploader)); } catch (Exception e) { - LOG.info("in teardown", e); + LOG.warn("Exeception in teardown", e); } } - cleanupWithLogger(LOG, mpu, mpu2); + + cleanupWithLogger(LOG, uploader0, uploader1); super.teardown(); } @@ -192,16 +214,16 @@ protected int timeToBecomeConsistentMillis() { * @param index index of upload * @return an uploader */ - protected MultipartUploader mpu(int index) { - return (index % 2 == 0) ? mpu : mpu2; + protected MultipartUploader getUploader(int index) { + return (index % 2 == 0) ? uploader0 : uploader1; } /** * Pick a multipart uploader at random. * @return an uploader */ - protected MultipartUploader randomMpu() { - return mpu(random.nextInt(10)); + protected MultipartUploader getRandomUploader() { + return getUploader(random.nextInt(10)); } /** @@ -211,39 +233,71 @@ protected MultipartUploader randomMpu() { @Test public void testSingleUpload() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); int size = SMALL_FILE; byte[] payload = generatePayload(1, size); origDigest.update(payload); + // use a single uploader + // note: the same is used here as it found a bug in the S3Guard + // DDB bulk operation state upload -the previous operation had + // added an entry to the ongoing state; this second call + // was interpreted as an inconsistent write. + MultipartUploader completer = uploader0; + // and upload with uploader 1 to validate cross-uploader uploads PartHandle partHandle = putPart(file, uploadHandle, 1, payload); partHandles.put(1, partHandle); - PathHandle fd = completeUpload(file, uploadHandle, partHandles, - origDigest, - size); + PathHandle fd = complete(completer, uploadHandle, file, + partHandles); + + validateUpload(file, origDigest, size); + // verify that if the implementation processes data immediately + // then a second attempt at the upload will fail. if (finalizeConsumesUploadIdImmediately()) { intercept(FileNotFoundException.class, - () -> mpu.complete(file, partHandles, uploadHandle)); + () -> complete(completer, uploadHandle, file, partHandles)); } else { - PathHandle fd2 = mpu.complete(file, partHandles, uploadHandle); + // otherwise, the same or other uploader can try again. + PathHandle fd2 = complete(completer, uploadHandle, file, partHandles); assertArrayEquals("Path handles differ", fd.toByteArray(), fd2.toByteArray()); } } /** - * Initialize an upload. + * Complete IO for a specific uploader; await the response. + * @param uploader uploader + * @param uploadHandle Identifier + * @param file Target path for upload + * @param partHandles handles map of part number to part handle + * @return unique PathHandle identifier for the uploaded file. + */ + protected PathHandle complete( + final MultipartUploader uploader, + final UploadHandle uploadHandle, + final Path file, + final Map partHandles) + throws IOException { + try (DurationInfo d = + new DurationInfo(LOG, "Complete upload to %s", file)) { + return awaitFuture( + uploader.complete(uploadHandle, file, partHandles)); + } + } + + /** + * start an upload. * This saves the path and upload handle as the active * upload, for aborting in teardown * @param dest destination * @return the handle * @throws IOException failure to initialize */ - protected UploadHandle initializeUpload(final Path dest) throws IOException { + protected UploadHandle startUpload(final Path dest) throws IOException { activeUploadPath = dest; - activeUpload = randomMpu().initialize(dest); + activeUpload = awaitFuture(getRandomUploader().startUpload(dest)); return activeUpload; } @@ -283,12 +337,17 @@ protected PartHandle putPart(final Path file, final int index, final byte[] payload) throws IOException { ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - PartHandle partHandle = mpu(index) - .putPart(file, - new ByteArrayInputStream(payload), - index, - uploadHandle, - payload.length); + PartHandle partHandle; + try (DurationInfo d = + new DurationInfo(LOG, "Put part %d (size %s) %s", + index, + payload.length, + file)) { + partHandle = awaitFuture(getUploader(index) + .putPart(uploadHandle, index, file, + new ByteArrayInputStream(payload), + payload.length)); + } timer.end("Uploaded part %s", index); LOG.info("Upload bandwidth {} MB/s", timer.bandwidthDescription(payload.length)); @@ -296,7 +355,7 @@ protected PartHandle putPart(final Path file, } /** - * Complete an upload with the active MPU instance. + * Complete an upload with a random uploader. * @param file destination * @param uploadHandle handle * @param partHandles map of handles @@ -312,36 +371,64 @@ private PathHandle completeUpload(final Path file, final int expectedLength) throws IOException { PathHandle fd = complete(file, uploadHandle, partHandles); - FileStatus status = verifyPathExists(getFileSystem(), - "Completed file", file); - assertEquals("length of " + status, - expectedLength, status.getLen()); + validateUpload(file, origDigest, expectedLength); + return fd; + } + + /** + * Complete an upload with a random uploader. + * @param file destination + * @param origDigest digest of source data (may be null) + * @param expectedLength expected length of result. + * @throws IOException IO failure + */ + private void validateUpload(final Path file, + final MessageDigest origDigest, + final int expectedLength) throws IOException { + verifyPathExists(getFileSystem(), + "Completed file", file); + verifyFileLength(file, expectedLength); if (origDigest != null) { verifyContents(file, origDigest, expectedLength); } - return fd; } /** * Verify the contents of a file. * @param file path * @param origDigest digest - * @param expectedLength expected length (for logging B/W) + * @param expectedLength expected length (for logging download bandwidth) * @throws IOException IO failure */ protected void verifyContents(final Path file, final MessageDigest origDigest, final int expectedLength) throws IOException { ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer(); - assertArrayEquals("digest of source and " + file - + " differ", - origDigest.digest(), digest(file)); + Assertions.assertThat(digest(file)) + .describedAs("digest of uploaded file %s", file) + .isEqualTo(origDigest.digest()); timer2.end("Completed digest", file); LOG.info("Download bandwidth {} MB/s", timer2.bandwidthDescription(expectedLength)); } + /** + * Verify the length of a file. + * @param file path + * @param expectedLength expected length + * @throws IOException IO failure + */ + private void verifyFileLength(final Path file, final long expectedLength) + throws IOException { + FileStatus st = getFileSystem().getFileStatus(file); + Assertions.assertThat(st) + .describedAs("Uploaded file %s", st) + .matches(FileStatus::isFile) + .extracting(FileStatus::getLen) + .isEqualTo(expectedLength); + } + /** * Perform the inner complete without verification. * @param file destination path @@ -353,21 +440,37 @@ protected void verifyContents(final Path file, private PathHandle complete(final Path file, final UploadHandle uploadHandle, final Map partHandles) throws IOException { - ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - PathHandle fd = randomMpu().complete(file, partHandles, uploadHandle); - timer.end("Completed upload to %s", file); - return fd; + return complete(getRandomUploader(), uploadHandle, file, + partHandles); } /** * Abort an upload. - * @param file path * @param uploadHandle handle + * @param file path * @throws IOException failure */ - private void abortUpload(final Path file, UploadHandle uploadHandle) + private void abortUpload(UploadHandle uploadHandle, + final Path file) throws IOException { - randomMpu().abort(file, uploadHandle); + try (DurationInfo d = + new DurationInfo(LOG, "Abort upload to %s", file)) { + awaitFuture(getRandomUploader().abort(uploadHandle, file)); + } + } + + /** + * Abort an upload; swallows exceptions. + * @param uploadHandle handle + * @param file path + */ + private void abortUploadQuietly(UploadHandle uploadHandle, Path file) { + try { + abortUpload(uploadHandle, file); + } catch (FileNotFoundException ignored) { + } catch (Exception e) { + LOG.info("aborting {}: {}", file, e.toString()); + } } /** @@ -377,10 +480,10 @@ private void abortUpload(final Path file, UploadHandle uploadHandle) @Test public void testMultipartUpload() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); - final int payloadCount = getTestPayloadCount(); + int payloadCount = getTestPayloadCount(); for (int i = 1; i <= payloadCount; ++i) { PartHandle partHandle = buildAndPutPart(file, uploadHandle, i, origDigest); @@ -400,16 +503,16 @@ public void testMultipartUploadEmptyPart() throws Exception { FileSystem fs = getFileSystem(); Path file = path("testMultipartUpload"); try (MultipartUploader uploader = - MultipartUploaderFactory.get(fs, null)) { - UploadHandle uploadHandle = uploader.initialize(file); + fs.createMultipartUploader(file).build()) { + UploadHandle uploadHandle = uploader.startUpload(file).get(); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); byte[] payload = new byte[0]; origDigest.update(payload); InputStream is = new ByteArrayInputStream(payload); - PartHandle partHandle = uploader.putPart(file, is, 1, uploadHandle, - payload.length); + PartHandle partHandle = awaitFuture( + uploader.putPart(uploadHandle, 1, file, is, payload.length)); partHandles.put(1, partHandle); completeUpload(file, uploadHandle, partHandles, origDigest, 0); } @@ -422,7 +525,7 @@ public void testMultipartUploadEmptyPart() throws Exception { @Test public void testUploadEmptyBlock() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); partHandles.put(1, putPart(file, uploadHandle, 1, new byte[0])); completeUpload(file, uploadHandle, partHandles, null, 0); @@ -435,10 +538,10 @@ public void testUploadEmptyBlock() throws Exception { @Test public void testMultipartUploadReverseOrder() throws Exception { Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); MessageDigest origDigest = DigestUtils.getMd5Digest(); - final int payloadCount = getTestPayloadCount(); + int payloadCount = getTestPayloadCount(); for (int i = 1; i <= payloadCount; ++i) { byte[] payload = generatePayload(i); origDigest.update(payload); @@ -459,7 +562,7 @@ public void testMultipartUploadReverseOrderNonContiguousPartNumbers() throws Exception { describe("Upload in reverse order and the part numbers are not contiguous"); Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); MessageDigest origDigest = DigestUtils.getMd5Digest(); int payloadCount = 2 * getTestPayloadCount(); for (int i = 2; i <= payloadCount; i += 2) { @@ -482,22 +585,22 @@ public void testMultipartUploadReverseOrderNonContiguousPartNumbers() public void testMultipartUploadAbort() throws Exception { describe("Upload and then abort it before completing"); Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); - int end = 10; + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); for (int i = 12; i > 10; i--) { partHandles.put(i, buildAndPutPart(file, uploadHandle, i, null)); } - abortUpload(file, uploadHandle); + abortUpload(uploadHandle, file); String contents = "ThisIsPart49\n"; int len = contents.getBytes(Charsets.UTF_8).length; InputStream is = IOUtils.toInputStream(contents, "UTF-8"); intercept(IOException.class, - () -> mpu.putPart(file, is, 49, uploadHandle, len)); + () -> awaitFuture( + uploader0.putPart(uploadHandle, 49, file, is, len))); intercept(IOException.class, - () -> mpu.complete(file, partHandles, uploadHandle)); + () -> complete(uploader0, uploadHandle, file, partHandles)); assertPathDoesNotExist("Uploaded file should not exist", file); @@ -505,9 +608,9 @@ public void testMultipartUploadAbort() throws Exception { // consumed by finalization operations (complete, abort). if (finalizeConsumesUploadIdImmediately()) { intercept(FileNotFoundException.class, - () -> abortUpload(file, uploadHandle)); + () -> abortUpload(uploadHandle, file)); } else { - abortUpload(file, uploadHandle); + abortUpload(uploadHandle, file); } } @@ -519,31 +622,55 @@ public void testAbortUnknownUpload() throws Exception { Path file = methodPath(); ByteBuffer byteBuffer = ByteBuffer.wrap( "invalid-handle".getBytes(Charsets.UTF_8)); - UploadHandle uploadHandle = BBUploadHandle.from(byteBuffer); intercept(FileNotFoundException.class, - () -> abortUpload(file, uploadHandle)); + () -> abortUpload(BBUploadHandle.from(byteBuffer), file)); } /** - * Trying to abort with a handle of size 0 must fail. + * Trying to abort an upload with no data does not create a file. */ @Test public void testAbortEmptyUpload() throws Exception { describe("initialize upload and abort before uploading data"); Path file = methodPath(); - abortUpload(file, initializeUpload(file)); + abortUpload(startUpload(file), file); assertPathDoesNotExist("Uploaded file should not exist", file); } + + /** + * Trying to abort an upload with no data does not create a file. + */ + @Test + public void testAbortAllPendingUploads() throws Exception { + describe("initialize upload and abort the pending upload"); + Path path = methodPath(); + Path file = new Path(path, "child"); + UploadHandle upload = startUpload(file); + try { + CompletableFuture oF + = getRandomUploader().abortUploadsUnderPath(path.getParent()); + int abortedUploads = awaitFuture(oF); + if (abortedUploads >= 0) { + // uploads can be aborted + Assertions.assertThat(abortedUploads) + .describedAs("Number of uploads aborted") + .isGreaterThanOrEqualTo(1); + assertPathDoesNotExist("Uploaded file should not exist", file); + } + } finally { + abortUploadQuietly(upload, file); + } + } + /** * Trying to abort with a handle of size 0 must fail. */ @Test public void testAbortEmptyUploadHandle() throws Exception { ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[0]); - UploadHandle uploadHandle = BBUploadHandle.from(byteBuffer); intercept(IllegalArgumentException.class, - () -> abortUpload(methodPath(), uploadHandle)); + () -> abortUpload(BBUploadHandle.from(byteBuffer), methodPath())); } /** @@ -553,10 +680,10 @@ public void testAbortEmptyUploadHandle() throws Exception { public void testCompleteEmptyUpload() throws Exception { describe("Expect an empty MPU to fail, but still be abortable"); Path dest = methodPath(); - UploadHandle handle = initializeUpload(dest); + UploadHandle handle = startUpload(dest); intercept(IllegalArgumentException.class, - () -> mpu.complete(dest, new HashMap<>(), handle)); - abortUpload(dest, handle); + () -> complete(uploader0, handle, dest, new HashMap<>())); + abortUpload(handle, dest); } /** @@ -571,7 +698,7 @@ public void testPutPartEmptyUploadID() throws Exception { byte[] payload = generatePayload(1); InputStream is = new ByteArrayInputStream(payload); intercept(IllegalArgumentException.class, - () -> mpu.putPart(dest, is, 1, emptyHandle, payload.length)); + () -> uploader0.putPart(emptyHandle, 1, dest, is, payload.length)); } /** @@ -581,7 +708,7 @@ public void testPutPartEmptyUploadID() throws Exception { public void testCompleteEmptyUploadID() throws Exception { describe("Expect IllegalArgumentException when complete uploadID is empty"); Path dest = methodPath(); - UploadHandle realHandle = initializeUpload(dest); + UploadHandle realHandle = startUpload(dest); UploadHandle emptyHandle = BBUploadHandle.from(ByteBuffer.wrap(new byte[0])); Map partHandles = new HashMap<>(); @@ -590,14 +717,14 @@ public void testCompleteEmptyUploadID() throws Exception { partHandles.put(1, partHandle); intercept(IllegalArgumentException.class, - () -> mpu.complete(dest, partHandles, emptyHandle)); + () -> complete(uploader0, emptyHandle, dest, partHandles)); // and, while things are setup, attempt to complete with // a part index of 0 partHandles.clear(); partHandles.put(0, partHandle); intercept(IllegalArgumentException.class, - () -> mpu.complete(dest, partHandles, realHandle)); + () -> complete(uploader0, realHandle, dest, partHandles)); } /** @@ -610,7 +737,7 @@ public void testCompleteEmptyUploadID() throws Exception { public void testDirectoryInTheWay() throws Exception { FileSystem fs = getFileSystem(); Path file = methodPath(); - UploadHandle uploadHandle = initializeUpload(file); + UploadHandle uploadHandle = startUpload(file); Map partHandles = new HashMap<>(); int size = SMALL_FILE; PartHandle partHandle = putPart(file, uploadHandle, 1, @@ -622,7 +749,7 @@ public void testDirectoryInTheWay() throws Exception { () -> completeUpload(file, uploadHandle, partHandles, null, size)); // abort should still work - abortUpload(file, uploadHandle); + abortUpload(uploadHandle, file); } @Test @@ -630,46 +757,44 @@ public void testConcurrentUploads() throws Throwable { // if the FS doesn't support concurrent uploads, this test is // required to fail during the second initialization. - final boolean concurrent = supportsConcurrentUploadsToSamePath(); + boolean concurrent = supportsConcurrentUploadsToSamePath(); describe("testing concurrent uploads, MPU support for this is " + concurrent); - final FileSystem fs = getFileSystem(); - final Path file = methodPath(); - final int size1 = SMALL_FILE; - final int partId1 = 1; - final byte[] payload1 = generatePayload(partId1, size1); - final MessageDigest digest1 = DigestUtils.getMd5Digest(); + Path file = methodPath(); + int size1 = SMALL_FILE; + int partId1 = 1; + byte[] payload1 = generatePayload(partId1, size1); + MessageDigest digest1 = DigestUtils.getMd5Digest(); digest1.update(payload1); - final UploadHandle upload1 = initializeUpload(file); - final Map partHandles1 = new HashMap<>(); + UploadHandle upload1 = startUpload(file); + Map partHandles1 = new HashMap<>(); // initiate part 2 // by using a different size, it's straightforward to see which // version is visible, before reading/digesting the contents - final int size2 = size1 * 2; - final int partId2 = 2; - final byte[] payload2 = generatePayload(partId1, size2); - final MessageDigest digest2 = DigestUtils.getMd5Digest(); + int size2 = size1 * 2; + int partId2 = 2; + byte[] payload2 = generatePayload(partId1, size2); + MessageDigest digest2 = DigestUtils.getMd5Digest(); digest2.update(payload2); - final UploadHandle upload2; + UploadHandle upload2; try { - upload2 = initializeUpload(file); + upload2 = startUpload(file); Assume.assumeTrue( "The Filesystem is unexpectedly supporting concurrent uploads", concurrent); } catch (IOException e) { if (!concurrent) { // this is expected, so end the test - LOG.debug("Expected exception raised on concurrent uploads {}", e); + LOG.debug("Expected exception raised on concurrent uploads", e); return; } else { throw e; } } - final Map partHandles2 = new HashMap<>(); - + Map partHandles2 = new HashMap<>(); assertNotEquals("Upload handles match", upload1, upload2); @@ -685,14 +810,27 @@ public void testConcurrentUploads() throws Throwable { // now upload part 2. complete(file, upload2, partHandles2); - // and await the visible length to match - eventually(timeToBecomeConsistentMillis(), 500, - () -> { - FileStatus status = fs.getFileStatus(file); - assertEquals("File length in " + status, - size2, status.getLen()); - }); + + // and await the visible length to match, if this FS is not + // consistent. + final int consistencyDelay = timeToBecomeConsistentMillis(); + if (consistencyDelay > 0) { + eventually(consistencyDelay, + () -> verifyFileLength(file, size2), + new LambdaTestUtils.ProportionalRetryInterval( + CONSISTENCY_INTERVAL, + consistencyDelay)); + } verifyContents(file, digest2, size2); } + + @Test + public void testPathCapabilities() throws Throwable { + FileSystem fs = getFileSystem(); + Assertions.assertThat(fs.hasPathCapability(getContract().getTestPath(), + CommonPathCapabilities.FS_MULTIPART_UPLOADER)) + .describedAs("fs %s, lacks multipart upload capability", fs) + .isTrue(); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java index 78ff2541483a3..e032604b5788c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java @@ -104,29 +104,43 @@ public void testRenameFileOverExistingFile() throws Throwable { assertIsFile(destFile); boolean renameOverwritesDest = isSupported(RENAME_OVERWRITES_DEST); boolean renameReturnsFalseOnRenameDestExists = - !isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS); + isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS); + assertFalse(RENAME_OVERWRITES_DEST + " and " + + RENAME_RETURNS_FALSE_IF_DEST_EXISTS + " cannot be both supported", + renameOverwritesDest && renameReturnsFalseOnRenameDestExists); + String expectedTo = "expected rename(" + srcFile + ", " + destFile + ") to "; + boolean destUnchanged = true; try { + // rename is rejected by returning 'false' or throwing an exception boolean renamed = rename(srcFile, destFile); + destUnchanged = !renamed; if (renameOverwritesDest) { - // the filesystem supports rename(file, file2) by overwriting file2 - - assertTrue("Rename returned false", renamed); - destUnchanged = false; + assertTrue(expectedTo + "overwrite destination, but got false", + renamed); + } else if (renameReturnsFalseOnRenameDestExists) { + assertFalse(expectedTo + "be rejected with false, but destination " + + "was overwritten", renamed); + } else if (renamed) { + String destDirLS = generateAndLogErrorListing(srcFile, destFile); + getLogger().error("dest dir {}", destDirLS); + + fail(expectedTo + "be rejected with exception, but got overwritten"); } else { - // rename is rejected by returning 'false' or throwing an exception - if (renamed && !renameReturnsFalseOnRenameDestExists) { - //expected an exception - String destDirLS = generateAndLogErrorListing(srcFile, destFile); - getLogger().error("dest dir {}", destDirLS); - fail("expected rename(" + srcFile + ", " + destFile + " ) to fail," + - " but got success and destination of " + destDirLS); - } + fail(expectedTo + "be rejected with exception, but got false"); } } catch (FileAlreadyExistsException e) { + // rename(file, file2) should throw exception iff + // it neither overwrites nor returns false + assertFalse(expectedTo + "overwrite destination, but got exception", + renameOverwritesDest); + assertFalse(expectedTo + "be rejected with false, but got exception", + renameReturnsFalseOnRenameDestExists); + handleExpectedException(e); } + // verify that the destination file is as expected based on the expected // outcome verifyFileContents(getFileSystem(), destFile, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java index 27c6933ae1885..4b5af02ecdad3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java @@ -22,13 +22,16 @@ import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.junit.Test; +import org.assertj.core.api.Assertions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.RemoteIterator; @@ -39,6 +42,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.deleteChildren; import static org.apache.hadoop.fs.contract.ContractTestUtils.dumpStats; +import static org.apache.hadoop.fs.contract.ContractTestUtils.iteratorToList; import static org.apache.hadoop.fs.contract.ContractTestUtils.listChildren; import static org.apache.hadoop.fs.contract.ContractTestUtils.toList; import static org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk; @@ -242,6 +246,13 @@ public void testSimpleRootListing() throws IOException { + "listStatus = " + listStatusResult + "listFiles = " + listFilesResult, fileList.size() <= statuses.length); + List statusList = (List) iteratorToList( + fs.listStatusIterator(root)); + Assertions.assertThat(statusList) + .describedAs("Result of listStatus(/) and listStatusIterator(/)" + + " must match") + .hasSameElementsAs(Arrays.stream(statuses) + .collect(Collectors.toList())); } @Test @@ -254,7 +265,7 @@ public void testRecursiveRootListing() throws IOException { fs.listFiles(root, true)); describe("verifying consistency with treewalk's files"); ContractTestUtils.TreeScanResults treeWalk = treeWalk(fs, root); - treeWalk.assertFieldsEquivalent("files", listing, + treeWalk.assertFieldsEquivalent("treewalk vs listFiles(/, true)", listing, treeWalk.getFiles(), listing.getFiles()); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java index ca8e4a053beac..d34178489c81d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java @@ -317,7 +317,7 @@ public void testPositionedBulkReadDoesntChangePosition() throws Throwable { int v = 256; byte[] readBuffer = new byte[v]; - assertEquals(v, instream.read(128, readBuffer, 0, v)); + instream.readFully(128, readBuffer, 0, v); //have gone back assertEquals(40000, instream.getPos()); //content is the same too @@ -572,8 +572,7 @@ public void testReadSmallFile() throws Throwable { // now read the entire file in one go byte[] fullFile = new byte[TEST_FILE_LEN]; - assertEquals(TEST_FILE_LEN, - instream.read(0, fullFile, 0, fullFile.length)); + instream.readFully(0, fullFile, 0, fullFile.length); assertEquals(0, instream.getPos()); // now read past the end of the file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractStreamIOStatisticsTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractStreamIOStatisticsTest.java new file mode 100644 index 0000000000000..89b21c497083b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractStreamIOStatisticsTest.java @@ -0,0 +1,313 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.contract; + +import java.util.Collections; +import java.util.List; + +import org.assertj.core.api.Assertions; +import org.junit.AfterClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.extractStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.demandStringifyIOStatisticsSource; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.snapshotIOStatistics; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_BYTES; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_WRITE_BYTES; + +/** + * Tests {@link IOStatistics} support in input and output streams. + *

    + * Requires both the input and output streams to offer the basic + * bytes read/written statistics. + *

    + * If the IO is buffered, that information must be provided, + * especially the input buffer size. + */ +public abstract class AbstractContractStreamIOStatisticsTest + extends AbstractFSContractTestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(AbstractContractStreamIOStatisticsTest.class); + + /** + * FileSystem statistics are collected across every test case. + */ + protected static final IOStatisticsSnapshot FILESYSTEM_IOSTATS = + snapshotIOStatistics(); + + @Override + public void teardown() throws Exception { + final FileSystem fs = getFileSystem(); + if (fs instanceof IOStatisticsSource) { + FILESYSTEM_IOSTATS.aggregate(((IOStatisticsSource)fs).getIOStatistics()); + } + super.teardown(); + } + + /** + * Dump the filesystem statistics after the class if contains any values. + */ + @AfterClass + public static void dumpFileSystemIOStatistics() { + if (!FILESYSTEM_IOSTATS.counters().isEmpty()) { + // if there is at least one counter + LOG.info("Aggregate FileSystem Statistics {}", + ioStatisticsToPrettyString(FILESYSTEM_IOSTATS)); + } + } + + @Test + public void testOutputStreamStatisticKeys() throws Throwable { + describe("Look at the statistic keys of an output stream"); + Path path = methodPath(); + FileSystem fs = getFileSystem(); + fs.mkdirs(path.getParent()); + try (FSDataOutputStream out = fs.create(path, true)) { + IOStatistics statistics = extractStatistics(out); + final List keys = outputStreamStatisticKeys(); + Assertions.assertThat(statistics.counters().keySet()) + .describedAs("statistic keys of %s", statistics) + .containsAll(keys); + Assertions.assertThat(keys) + .describedAs("Statistics supported by the stream %s", out) + .contains(STREAM_WRITE_BYTES); + } finally { + fs.delete(path, false); + } + } + + /** + * If the stream writes in blocks, then counters during the write may be + * zero until a whole block is written -or the write has finished. + * @return true if writes are buffered into whole blocks. + */ + public boolean streamWritesInBlocks() { + return false; + } + + @Test + public void testWriteSingleByte() throws Throwable { + describe("Write a byte to a file and verify" + + " the stream statistics are updated"); + Path path = methodPath(); + FileSystem fs = getFileSystem(); + fs.mkdirs(path.getParent()); + boolean writesInBlocks = streamWritesInBlocks(); + try (FSDataOutputStream out = fs.create(path, true)) { + IOStatistics statistics = extractStatistics(out); + // before a write, no bytes + verifyStatisticCounterValue(statistics, STREAM_WRITE_BYTES, 0); + out.write('0'); + verifyStatisticCounterValue(statistics, STREAM_WRITE_BYTES, + writesInBlocks ? 0 : 1); + // close the stream + out.close(); + // statistics are still valid after the close + // always call the output stream to check that behavior + statistics = extractStatistics(out); + final String strVal = statistics.toString(); + LOG.info("Statistics = {}", strVal); + verifyStatisticCounterValue(statistics, STREAM_WRITE_BYTES, 1); + } finally { + fs.delete(path, false); + } + } + + @Test + public void testWriteByteArrays() throws Throwable { + describe("Write byte arrays to a file and verify" + + " the stream statistics are updated"); + Path path = methodPath(); + FileSystem fs = getFileSystem(); + fs.mkdirs(path.getParent()); + boolean writesInBlocks = streamWritesInBlocks(); + try (FSDataOutputStream out = fs.create(path, true)) { + Object demandStatsString = demandStringifyIOStatisticsSource(out); + // before a write, no bytes + final byte[] bytes = ContractTestUtils.toAsciiByteArray( + "statistically-speaking"); + final long len = bytes.length; + out.write(bytes); + out.flush(); + LOG.info("stats {}", demandStatsString); + IOStatistics statistics = extractStatistics(out); + verifyStatisticCounterValue(statistics, STREAM_WRITE_BYTES, + writesInBlocks ? 0 : len); + out.write(bytes); + out.flush(); + verifyStatisticCounterValue(statistics, STREAM_WRITE_BYTES, + writesInBlocks ? 0 : len * 2); + // close the stream + out.close(); + LOG.info("stats {}", demandStatsString); + // statistics are still valid after the close + // always call the output stream to check that behavior + statistics = extractStatistics(out); + verifyStatisticCounterValue(statistics, STREAM_WRITE_BYTES, len * 2); + // the to string value must contain the same counterHiCable you mean + Assertions.assertThat(demandStatsString.toString()) + .contains(Long.toString(len * 2)); + } finally { + fs.delete(path, false); + } + } + + @Test + public void testInputStreamStatisticKeys() throws Throwable { + describe("Look at the statistic keys of an input stream"); + Path path = methodPath(); + FileSystem fs = getFileSystem(); + ContractTestUtils.touch(fs, path); + try (FSDataInputStream in = fs.open(path)) { + IOStatistics statistics = extractStatistics(in); + final List keys = inputStreamStatisticKeys(); + Assertions.assertThat(statistics.counters().keySet()) + .describedAs("statistic keys of %s", statistics) + .containsAll(keys); + Assertions.assertThat(keys) + .describedAs("Statistics supported by the stream %s", in) + .contains(STREAM_READ_BYTES); + verifyStatisticCounterValue(statistics, STREAM_READ_BYTES, 0); + } finally { + fs.delete(path, false); + } + } + + @Test + public void testInputStreamStatisticRead() throws Throwable { + describe("Read Data from an input stream"); + Path path = methodPath(); + FileSystem fs = getFileSystem(); + final int fileLen = 1024; + final byte[] ds = dataset(fileLen, 'a', 26); + ContractTestUtils.writeDataset(fs, path, ds, fileLen, 8_000, true); + + try (FSDataInputStream in = fs.open(path)) { + long current = 0; + IOStatistics statistics = extractStatistics(in); + verifyStatisticCounterValue(statistics, STREAM_READ_BYTES, 0); + Assertions.assertThat(in.read()).isEqualTo('a'); + int bufferSize = readBufferSize(); + // either a single byte was read or a whole block + current = verifyBytesRead(statistics, current, 1, bufferSize); + final int bufferLen = 128; + byte[] buf128 = new byte[bufferLen]; + in.read(buf128); + current = verifyBytesRead(statistics, current, bufferLen, bufferSize); + in.readFully(buf128); + current = verifyBytesRead(statistics, current, bufferLen, bufferSize); + in.readFully(0, buf128); + current = verifyBytesRead(statistics, current, bufferLen, bufferSize); + // seek must not increment the read counter + in.seek(256); + verifyBytesRead(statistics, current, 0, bufferSize); + + // if a stream implements lazy-seek the seek operation + // may be postponed until the read + final int sublen = 32; + Assertions.assertThat(in.read(buf128, 0, sublen)) + .isEqualTo(sublen); + current = verifyBytesRead(statistics, current, sublen, bufferSize); + + // perform some read operations near the end of the file such that + // the buffer will not be completely read. + // skip these tests for buffered IO as it is too complex to work out + if (bufferSize == 0) { + final int pos = fileLen - sublen; + in.seek(pos); + Assertions.assertThat(in.read(buf128)) + .describedAs("Read overlapping EOF") + .isEqualTo(sublen); + current = verifyStatisticCounterValue(statistics, STREAM_READ_BYTES, + current + sublen); + Assertions.assertThat(in.read(pos, buf128, 0, bufferLen)) + .describedAs("Read(buffer) overlapping EOF") + .isEqualTo(sublen); + verifyStatisticCounterValue(statistics, STREAM_READ_BYTES, + current + sublen); + } + } finally { + fs.delete(path, false); + } + } + + /** + * Verify the bytes read value, taking into account block size. + * @param statistics stats + * @param current current count + * @param bytesRead bytes explicitly read + * @param bufferSize buffer size of stream + * @return the current count of bytes read ignoring block size + */ + public long verifyBytesRead(final IOStatistics statistics, + final long current, + final int bytesRead, final int bufferSize) { + // final position. for unbuffered read, this is the expected value + long finalPos = current + bytesRead; + long expected = finalPos; + if (bufferSize > 0) { + // buffered. count of read is number of buffers already read + // plus the current buffer, multiplied by that buffer size + expected = bufferSize * (1 + (current / bufferSize)); + } + verifyStatisticCounterValue(statistics, STREAM_READ_BYTES, expected); + return finalPos; + } + + /** + * Buffer size for reads. + * Filesystems performing block reads (checksum, etc) + * must return their buffer value is + * @return buffer capacity; 0 for unbuffered + */ + public int readBufferSize() { + return 0; + } + + /** + * Keys which the output stream must support. + * @return a list of keys + */ + public List outputStreamStatisticKeys() { + return Collections.singletonList(STREAM_WRITE_BYTES); + } + + /** + * Keys which the input stream must support. + * @return a list of keys + */ + public List inputStreamStatisticKeys() { + return Collections.singletonList(STREAM_READ_BYTES); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java index 5eb1e892f83d5..adaf0a910c620 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java @@ -137,7 +137,8 @@ protected void validateFileContents(FSDataInputStream stream, int length, throws IOException { byte[] streamData = new byte[length]; assertEquals("failed to read expected number of bytes from " - + "stream", length, stream.read(streamData)); + + "stream. This may be transient", + length, stream.read(streamData)); byte[] validateFileBytes; if (startIndex == 0 && length == fileBytes.length) { validateFileBytes = fileBytes; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java index f09496a6082c8..76d3116c3abdc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java @@ -69,6 +69,14 @@ public void init() throws IOException { } + /** + * Any teardown logic can go here. + * @throws IOException IO problems + */ + public void teardown() throws IOException { + + } + /** * Add a configuration resource to this instance's configuration * @param resource resource reference @@ -113,7 +121,7 @@ public FileSystem getFileSystem(URI uri) throws IOException { public abstract FileSystem getTestFileSystem() throws IOException; /** - * Get the scheme of this FS + * Get the scheme of this FS. * @return the scheme this FS supports */ public abstract String getScheme(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java index 60373f67992eb..ac9de6d7bfe8c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java @@ -213,6 +213,9 @@ public void teardown() throws Exception { Thread.currentThread().setName("teardown"); LOG.debug("== Teardown =="); deleteTestDirInTeardown(); + if (contract != null) { + contract.teardown(); + } LOG.debug("== Teardown complete =="); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java index 3f31c07742c59..29cd29dfaf225 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java @@ -241,4 +241,19 @@ public interface ContractOptions { */ String TEST_RANDOM_SEEK_COUNT = "test.random-seek-count"; + /** + * Is hflush supported in API and StreamCapabilities? + */ + String SUPPORTS_HFLUSH = "supports-hflush"; + + /** + * Is hsync supported in API and StreamCapabilities? + */ + String SUPPORTS_HSYNC = "supports-hsync"; + + /** + * Is the metadata updated after an hsync? + * HDFS does not do this. + */ + String METADATA_UPDATED_ON_HSYNC = "metadata_updated_on_hsync"; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java index 4789630f95f1c..35193fa2dc712 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java @@ -233,8 +233,8 @@ public static byte[] readDataset(FileSystem fs, Path path, int len) public static void verifyFileContents(FileSystem fs, Path path, byte[] original) throws IOException { - assertIsFile(fs, path); FileStatus stat = fs.getFileStatus(path); + assertIsFile(path, stat); String statText = stat.toString(); assertEquals("wrong length " + statText, original.length, stat.getLen()); byte[] bytes = readDataset(fs, path, original.length); @@ -1453,6 +1453,52 @@ public static List toList( return list; } + /** + * Convert a remote iterator over file status results into a list. + * The utility equivalents in commons collection and guava cannot be + * used here, as this is a different interface, one whose operators + * can throw IOEs. + * @param iterator input iterator + * @return the file status entries as a list. + * @throws IOException + */ + public static List iteratorToList( + RemoteIterator iterator) throws IOException { + List list = new ArrayList<>(); + while (iterator.hasNext()) { + list.add(iterator.next()); + } + return list; + } + + + /** + * Convert a remote iterator over file status results into a list. + * This uses {@link RemoteIterator#next()} calls only, expecting + * a raised {@link NoSuchElementException} exception to indicate that + * the end of the listing has been reached. This iteration strategy is + * designed to verify that the implementation of the remote iterator + * generates results and terminates consistently with the {@code hasNext/next} + * iteration. More succinctly "verifies that the {@code next()} operator + * isn't relying on {@code hasNext()} to always be called during an iteration. + * @param iterator input iterator + * @return the status entries as a list. + * @throws IOException IO problems + */ + @SuppressWarnings("InfiniteLoopStatement") + public static List iteratorToListThroughNextCallsAlone( + RemoteIterator iterator) throws IOException { + List list = new ArrayList<>(); + try { + while (true) { + list.add(iterator.next()); + } + } catch (NoSuchElementException expected) { + // ignored + } + return list; + } + /** * Convert a remote iterator over file status results into a list. * This uses {@link RemoteIterator#next()} calls only, expecting @@ -1496,19 +1542,49 @@ public static void assertCapabilities( StreamCapabilities source = (StreamCapabilities) stream; if (shouldHaveCapabilities != null) { for (String shouldHaveCapability : shouldHaveCapabilities) { - assertTrue("Should have capability: " + shouldHaveCapability, + assertTrue("Should have capability: " + shouldHaveCapability + + " in " + source, source.hasCapability(shouldHaveCapability)); } } if (shouldNotHaveCapabilities != null) { for (String shouldNotHaveCapability : shouldNotHaveCapabilities) { - assertFalse("Should not have capability: " + shouldNotHaveCapability, + assertFalse("Should not have capability: " + shouldNotHaveCapability + + " in " + source, source.hasCapability(shouldNotHaveCapability)); } } } + + /** + * Custom assert to verify capabilities supported by + * an object through {@link StreamCapabilities}. + * + * @param source The object to test for StreamCapabilities + * @param capabilities The list of expected capabilities + */ + public static void assertHasStreamCapabilities( + final Object source, + final String... capabilities) { + assertCapabilities(source, capabilities, null); + } + + /** + * Custom assert to verify capabilities NOT supported by + * an object through {@link StreamCapabilities}. + * + * @param source The object to test for StreamCapabilities + * @param capabilities The list of capabilities which must not be + * supported. + */ + public static void assertLacksStreamCapabilities( + final Object source, + final String... capabilities) { + assertCapabilities(source, null, capabilities); + } + /** * Custom assert to test {@link PathCapabilities}. * @@ -1523,7 +1599,8 @@ public static void assertHasPathCapabilities( for (String shouldHaveCapability: capabilities) { assertTrue("Should have capability: " + shouldHaveCapability - + " under " + path, + + " under " + path + + " in " + source, source.hasPathCapability(path, shouldHaveCapability)); } } @@ -1602,7 +1679,7 @@ public TreeScanResults(Path basePath) { * @param results results of the listFiles/listStatus call. * @throws IOException IO problems during the iteration. */ - public TreeScanResults(RemoteIterator results) + public TreeScanResults(RemoteIterator results) throws IOException { while (results.hasNext()) { add(results.next()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java index f8eeb961e92ff..3cea68c221000 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java @@ -18,7 +18,10 @@ package org.apache.hadoop.fs.contract.localfs; +import org.junit.Test; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractFSContract; @@ -29,4 +32,17 @@ protected AbstractFSContract createContract(Configuration conf) { return new LocalFSContract(conf); } + @Test + public void testSyncablePassthroughIfChecksumDisabled() throws Throwable { + describe("Create an instance of the local fs, disable the checksum" + + " and verify that Syncable now works"); + LocalFileSystem fs = (LocalFileSystem) getFileSystem(); + try (LocalFileSystem lfs = new LocalFileSystem( + fs.getRawFileSystem())) { + // disable checksumming output + lfs.setWriteChecksum(false); + // now the filesystem supports Sync with immediate update of file status + validateSyncableSemantics(lfs, true, true, true); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java deleted file mode 100644 index f675ddfa0db82..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractMultipartUploader.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.contract.localfs; - -import org.junit.Assume; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -/** - * Test the FileSystemMultipartUploader on local file system. - */ -public class TestLocalFSContractMultipartUploader - extends AbstractContractMultipartUploaderTest { - - @Override - public void setup() throws Exception { - Assume.assumeTrue("Skipping until HDFS-13934", false); - super.setup(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new LocalFSContract(conf); - } - - /** - * There is no real need to upload any particular size. - * @return 1 kilobyte - */ - @Override - protected int partSizeInBytes() { - return 1024; - } - - @Override - protected boolean finalizeConsumesUploadIdImmediately() { - return true; - } - - @Override - protected boolean supportsConcurrentUploadsToSamePath() { - return true; - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractStreamIOStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractStreamIOStatistics.java new file mode 100644 index 0000000000000..642baec502d2e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractStreamIOStatistics.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.contract.localfs; + +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractStreamIOStatisticsTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; + +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_BYTES; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_EXCEPTIONS; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_SKIP_BYTES; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_SKIP_OPERATIONS; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_WRITE_BYTES; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_WRITE_EXCEPTIONS; + +/** + * Test IOStatistics through the local FS. + */ +public class TestLocalFSContractStreamIOStatistics extends + AbstractContractStreamIOStatisticsTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new LocalFSContract(conf); + } + + /** + * Keys which the input stream must support. + * @return a list of keys + */ + public List inputStreamStatisticKeys() { + return Arrays.asList(STREAM_READ_BYTES, + STREAM_READ_EXCEPTIONS, + STREAM_READ_SEEK_OPERATIONS, + STREAM_READ_SKIP_OPERATIONS, + STREAM_READ_SKIP_BYTES); + } + + /** + * Keys which the output stream must support. + * @return a list of keys + */ + @Override + public List outputStreamStatisticKeys() { + return Arrays.asList(STREAM_WRITE_BYTES, + STREAM_WRITE_EXCEPTIONS); + } + + @Override + public int readBufferSize() { + return 1024; + } + + @Override + public boolean streamWritesInBlocks() { + return true; + } + + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/SFTPContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/SFTPContract.java new file mode 100644 index 0000000000000..f72a2aec86242 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/SFTPContract.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.contract.sftp; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.fs.sftp.SFTPFileSystem; +import org.apache.sshd.common.NamedFactory; +import org.apache.sshd.server.SshServer; +import org.apache.sshd.server.auth.UserAuth; +import org.apache.sshd.server.auth.password.UserAuthPasswordFactory; +import org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider; +import org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory; + +public class SFTPContract extends AbstractFSContract { + + private static final String CONTRACT_XML = "contract/sftp.xml"; + private static final URI TEST_URI = + URI.create("sftp://user:password@localhost"); + private final String testDataDir = + new FileSystemTestHelper().getTestRootDir(); + private final Configuration conf; + private SshServer sshd; + + public SFTPContract(Configuration conf) { + super(conf); + addConfResource(CONTRACT_XML); + this.conf = conf; + } + + @Override + public void init() throws IOException { + sshd = SshServer.setUpDefaultServer(); + // ask OS to assign a port + sshd.setPort(0); + sshd.setKeyPairProvider(new SimpleGeneratorHostKeyProvider()); + + List> userAuthFactories = new ArrayList<>(); + userAuthFactories.add(new UserAuthPasswordFactory()); + + sshd.setUserAuthFactories(userAuthFactories); + sshd.setPasswordAuthenticator((username, password, session) -> + username.equals("user") && password.equals("password") + ); + + sshd.setSubsystemFactories( + Collections.singletonList(new SftpSubsystemFactory())); + + sshd.start(); + int port = sshd.getPort(); + + conf.setClass("fs.sftp.impl", SFTPFileSystem.class, FileSystem.class); + conf.setInt("fs.sftp.host.port", port); + conf.setBoolean("fs.sftp.impl.disable.cache", true); + } + + @Override + public void teardown() throws IOException { + if (sshd != null) { + sshd.stop(); + } + } + + @Override + public FileSystem getTestFileSystem() throws IOException { + return FileSystem.get(TEST_URI, conf); + } + + @Override + public String getScheme() { + return "sftp"; + } + + @Override + public Path getTestPath() { + try { + FileSystem fs = FileSystem.get( + URI.create("sftp://user:password@localhost"), conf + ); + return fs.makeQualified(new Path(testDataDir)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/TestSFTPContractSeek.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/TestSFTPContractSeek.java new file mode 100644 index 0000000000000..20f4116b98019 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/sftp/TestSFTPContractSeek.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.contract.sftp; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractSeekTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; + +public class TestSFTPContractSeek extends AbstractContractSeekTest { + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new SFTPContract(conf); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/FtpTestServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/FtpTestServer.java new file mode 100644 index 0000000000000..eca26dea5b39b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/FtpTestServer.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ftp; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; + +import org.apache.ftpserver.FtpServer; +import org.apache.ftpserver.FtpServerFactory; +import org.apache.ftpserver.ftplet.Authority; +import org.apache.ftpserver.ftplet.FtpException; +import org.apache.ftpserver.ftplet.UserManager; +import org.apache.ftpserver.impl.DefaultFtpServer; +import org.apache.ftpserver.listener.Listener; +import org.apache.ftpserver.listener.ListenerFactory; +import org.apache.ftpserver.usermanager.PropertiesUserManagerFactory; +import org.apache.ftpserver.usermanager.impl.BaseUser; + +/** + * Helper class facilitating to manage a local ftp + * server for unit tests purposes only. + */ +public class FtpTestServer { + + private int port; + private Path ftpRoot; + private UserManager userManager; + private FtpServer server; + + public FtpTestServer(Path ftpRoot) { + this.ftpRoot = ftpRoot; + this.userManager = new PropertiesUserManagerFactory().createUserManager(); + FtpServerFactory serverFactory = createServerFactory(); + serverFactory.setUserManager(userManager); + this.server = serverFactory.createServer(); + } + + public FtpTestServer start() throws Exception { + server.start(); + Listener listener = ((DefaultFtpServer) server) + .getListeners() + .get("default"); + port = listener.getPort(); + return this; + } + + public Path getFtpRoot() { + return ftpRoot; + } + + public int getPort() { + return port; + } + + public void stop() { + if (!server.isStopped()) { + server.stop(); + } + } + + public BaseUser addUser(String name, String password, + Authority... authorities) throws IOException, FtpException { + + BaseUser user = new BaseUser(); + user.setName(name); + user.setPassword(password); + Path userHome = Files.createDirectory(ftpRoot.resolve(name)); + user.setHomeDirectory(userHome.toString()); + user.setAuthorities(Arrays.asList(authorities)); + userManager.save(user); + return user; + } + + private FtpServerFactory createServerFactory() { + FtpServerFactory serverFactory = new FtpServerFactory(); + ListenerFactory defaultListener = new ListenerFactory(); + defaultListener.setPort(0); + serverFactory.addListener("default", defaultListener.createListener()); + return serverFactory; + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java index 3d41ccb91d6c4..b25990f18e9a7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java @@ -17,18 +17,35 @@ */ package org.apache.hadoop.fs.ftp; -import com.google.common.base.Preconditions; -import org.apache.commons.net.ftp.FTP; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.Comparator; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.commons.net.ftp.FTP; import org.apache.commons.net.ftp.FTPClient; import org.apache.commons.net.ftp.FTPFile; +import org.apache.ftpserver.usermanager.impl.BaseUser; +import org.apache.ftpserver.usermanager.impl.WritePermission; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.After; +import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; - +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; /** @@ -37,9 +54,72 @@ */ public class TestFTPFileSystem { + private FtpTestServer server; + @Rule public Timeout testTimeout = new Timeout(180000); + @Before + public void setUp() throws Exception { + server = new FtpTestServer(GenericTestUtils.getTestDir().toPath()).start(); + } + + @After + @SuppressWarnings("ResultOfMethodCallIgnored") + public void tearDown() throws Exception { + if (server != null) { + server.stop(); + Files.walk(server.getFtpRoot()) + .sorted(Comparator.reverseOrder()) + .map(java.nio.file.Path::toFile) + .forEach(File::delete); + } + } + + @Test + public void testCreateWithWritePermissions() throws Exception { + BaseUser user = server.addUser("test", "password", new WritePermission()); + Configuration configuration = new Configuration(); + configuration.set("fs.defaultFS", "ftp:///"); + configuration.set("fs.ftp.host", "localhost"); + configuration.setInt("fs.ftp.host.port", server.getPort()); + configuration.set("fs.ftp.user.localhost", user.getName()); + configuration.set("fs.ftp.password.localhost", user.getPassword()); + configuration.setBoolean("fs.ftp.impl.disable.cache", true); + + FileSystem fs = FileSystem.get(configuration); + byte[] bytesExpected = "hello world".getBytes(StandardCharsets.UTF_8); + try (FSDataOutputStream outputStream = fs.create(new Path("test1.txt"))) { + outputStream.write(bytesExpected); + } + try (FSDataInputStream input = fs.open(new Path("test1.txt"))) { + assertThat(bytesExpected, equalTo(IOUtils.readFullyToByteArray(input))); + } + } + + @Test + public void testCreateWithoutWritePermissions() throws Exception { + BaseUser user = server.addUser("test", "password"); + Configuration configuration = new Configuration(); + configuration.set("fs.defaultFS", "ftp:///"); + configuration.set("fs.ftp.host", "localhost"); + configuration.setInt("fs.ftp.host.port", server.getPort()); + configuration.set("fs.ftp.user.localhost", user.getName()); + configuration.set("fs.ftp.password.localhost", user.getPassword()); + configuration.setBoolean("fs.ftp.impl.disable.cache", true); + + FileSystem fs = FileSystem.get(configuration); + byte[] bytesExpected = "hello world".getBytes(StandardCharsets.UTF_8); + LambdaTestUtils.intercept( + IOException.class, "Unable to create file: test1.txt, Aborting", + () -> { + try (FSDataOutputStream out = fs.create(new Path("test1.txt"))) { + out.write(bytesExpected); + } + } + ); + } + @Test public void testFTPDefaultPort() throws Exception { FTPFileSystem ftp = new FTPFileSystem(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java index 4b3bd2f94075c..1ccc3400788d1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java @@ -47,7 +47,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java index 693926242c95d..58452f86f5999 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java @@ -374,4 +374,15 @@ public void testMkDirs() throws IOException { assertThat(((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount(), is(1)); } + + @Test + public void testCloseFileSystemClosesConnectionPool() throws Exception { + SFTPFileSystem fs = (SFTPFileSystem) sftpFs; + fs.getHomeDirectory(); + assertThat(fs.getConnectionPool().getLiveConnCount(), is(1)); + fs.close(); + assertThat(fs.getConnectionPool().getLiveConnCount(), is(0)); + ///making sure that re-entrant close calls are safe + fs.close(); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestFsShellConcat.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestFsShellConcat.java new file mode 100644 index 0000000000000..a2c4d3a1972c7 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestFsShellConcat.java @@ -0,0 +1,167 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.shell; + +import java.io.ByteArrayOutputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.net.URI; +import java.util.Random; + +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.assertj.core.api.Assertions; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.mockito.ArgumentMatchers.any; +import static org.junit.Assert.assertEquals; + +/** + * Test Concat. + */ +public class TestFsShellConcat extends AbstractHadoopTestBase { + + private static Configuration conf; + private static FsShell shell; + private static LocalFileSystem lfs; + private static Path testRootDir; + private static Path dstPath; + + @Before + public void before() throws IOException { + conf = new Configuration(); + shell = new FsShell(conf); + lfs = FileSystem.getLocal(conf); + testRootDir = lfs.makeQualified(new Path(GenericTestUtils.getTempPath( + "testFsShellCopy"))); + + lfs.delete(testRootDir, true); + lfs.mkdirs(testRootDir); + lfs.setWorkingDirectory(testRootDir); + dstPath = new Path(testRootDir, "dstFile"); + lfs.create(dstPath).close(); + + Random random = new Random(); + for (int i = 0; i < 10; i++) { + OutputStream out = + lfs.create(new Path(testRootDir, String.format("file-%02d", i))); + out.write(random.nextInt()); + out.close(); + } + } + + @Test + public void testConcat() throws Exception { + // Read concatenated files to build the expected file content. + ByteArrayOutputStream out = new ByteArrayOutputStream(); + for (int i = 0; i < 10; i++) { + try (InputStream in = lfs + .open(new Path(testRootDir, String.format("file-%02d", i)))) { + IOUtils.copyBytes(in, out, 1024); + } + } + byte[] expectContent = out.toByteArray(); + + // Do concat. + FileSystem mockFs = Mockito.mock(FileSystem.class); + Mockito.doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + Path target = (Path)args[0]; + Path[] src = (Path[]) args[1]; + mockConcat(target, src); + return null; + }).when(mockFs).concat(any(Path.class), any(Path[].class)); + Concat.setTestFs(mockFs); + shellRun(0, "-concat", dstPath.toString(), testRootDir+"/file-*"); + + // Verify concat result. + ContractTestUtils + .assertPathExists(lfs, "The target file doesn't exist.", dstPath); + Assertions.assertThat(lfs.listStatus(testRootDir).length).isEqualTo(1); + assertEquals(expectContent.length, lfs.getFileStatus(dstPath).getLen()); + out = new ByteArrayOutputStream(); + try (InputStream in = lfs.open(dstPath)) { + IOUtils.copyBytes(in, out, 1024); + } + // Verify content. + byte[] concatedContent = out.toByteArray(); + assertEquals(expectContent.length, concatedContent.length); + ContractTestUtils.compareByteArrays(expectContent, concatedContent, + expectContent.length); + } + + @Test + public void testUnsupportedFs() throws Exception { + FileSystem mockFs = Mockito.mock(FileSystem.class); + Mockito.doThrow( + new UnsupportedOperationException("Mock unsupported exception.")) + .when(mockFs).concat(any(Path.class), any(Path[].class)); + Mockito.doAnswer(invocationOnMock -> new URI("mockfs:///")).when(mockFs) + .getUri(); + Concat.setTestFs(mockFs); + final ByteArrayOutputStream err = new ByteArrayOutputStream(); + PrintStream oldErr = System.err; + System.setErr(new PrintStream(err)); + try { + shellRun(1, "-concat", dstPath.toString(), testRootDir + "/file-*"); + } finally { + System.setErr(oldErr); + } + System.err.print(err.toString()); + String expectedErrMsg = "Dest filesystem 'mockfs' doesn't support concat"; + Assertions.assertThat(err.toString().contains(expectedErrMsg)) + .withFailMessage("The err message should contain \"" + expectedErrMsg + + "\" message.").isTrue(); + } + + private void shellRun(int n, String... args) { + assertEquals(n, shell.run(args)); + } + + /** + * Simple simulation of concat. + */ + private void mockConcat(Path target, Path[] srcArray) throws IOException { + Path tmp = new Path(target.getParent(), target.getName() + ".bak"); + lfs.rename(target, tmp); + try (OutputStream out = lfs.create(target)) { + try (InputStream in = lfs.open(tmp)) { + IOUtils.copyBytes(in, out, 1024); + } + lfs.delete(tmp, true); + for (int i = 0; i < srcArray.length; i++) { + try (InputStream iin = lfs.open(srcArray[i])) { + IOUtils.copyBytes(iin, out, 1024); + } + lfs.delete(srcArray[i], true); + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java index 1f379448ee86c..b9e87d3dacefe 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestMove.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.PathExistsException; +import org.apache.hadoop.fs.shell.CommandFormat.UnknownOptionException; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -93,6 +94,12 @@ public void testMoveTargetExistsWithoutExplicitRename() throws Exception { assertTrue("Rename should have failed with path exists exception", cmd.error instanceof PathExistsException); } + + @Test(expected = UnknownOptionException.class) + public void testMoveFromLocalDoesNotAllowTOption() { + new MoveCommands.MoveFromLocal().run("-t", "2", + null, null); + } static class MockFileSystem extends FilterFileSystem { Configuration conf; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/IOStatisticAssertions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/IOStatisticAssertions.java new file mode 100644 index 0000000000000..22f6c33d2e260 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/IOStatisticAssertions.java @@ -0,0 +1,528 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.ObjectStreamClass; +import java.io.Serializable; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import org.assertj.core.api.AbstractLongAssert; +import org.assertj.core.api.ObjectAssert; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Assertions and any other support for IOStatistics testing. + * If used downstream: know it is unstable. + */ + +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class IOStatisticAssertions { + + private static final String COUNTER = "Counter"; + + private static final String GAUGE = "Gauge"; + + private static final String MINIMUM = "Minimum"; + + private static final String MAXIMUM = "Maxiumum"; + + private static final String MEAN = "Mean"; + + private IOStatisticAssertions() { + } + + /** + * Get a required counter statistic. + * @param stats statistics source + * @param key statistic key + * @return the value + */ + public static long lookupCounterStatistic( + final IOStatistics stats, + final String key) { + return lookupStatistic(COUNTER, key, + verifyStatisticsNotNull(stats).counters()); + } + + /** + * Given an IOStatistics instance, verify it is not null, + * and return the value for continued use in a test. + * @param stats statistics source. + * @param type of statistics + * @return the value passed in. + */ + public static T + verifyStatisticsNotNull(final T stats) { + assertThat(stats) + .describedAs("IO Statistics reference") + .isNotNull(); + return stats; + } + + /** + * Get a required gauge statistic. + * @param stats statistics source + * @param key statistic key + * @return the value + */ + public static long lookupGaugeStatistic( + final IOStatistics stats, + final String key) { + return lookupStatistic(GAUGE, key, + verifyStatisticsNotNull(stats).gauges()); + } + + /** + * Get a required maximum statistic. + * @param stats statistics source + * @param key statistic key + * @return the value + */ + public static long lookupMaximumStatistic( + final IOStatistics stats, + final String key) { + return lookupStatistic(MAXIMUM, key, + verifyStatisticsNotNull(stats).maximums()); + } + + /** + * Get a required minimum statistic. + * @param stats statistics source + * @param key statistic key + * @return the value + */ + public static long lookupMinimumStatistic( + final IOStatistics stats, + final String key) { + return lookupStatistic(MINIMUM, key, + verifyStatisticsNotNull(stats).minimums()); + } + + /** + * Get a required mean statistic. + * @param stats statistics source + * @param key statistic key + * @return the value + */ + public static MeanStatistic lookupMeanStatistic( + final IOStatistics stats, + final String key) { + return lookupStatistic(MEAN, key, + verifyStatisticsNotNull(stats).meanStatistics()); + } + + /** + * Get a required counter statistic. + * @param type of map element + * @param type type for error text + * @param key statistic key + * @param map map to probe + * @return the value + */ + private static E lookupStatistic( + final String type, + final String key, + final Map map) { + final E statistic = map.get(key); + assertThat(statistic) + .describedAs("%s named %s", type, key) + .isNotNull(); + return statistic; + } + + /** + * Assert that a counter has an expected value. + * @param stats statistics source + * @param key statistic key + * @param value expected value. + * @return the value (which always equals the expected value) + */ + public static long verifyStatisticCounterValue( + final IOStatistics stats, + final String key, + final long value) { + return verifyStatisticValue(COUNTER, key, + verifyStatisticsNotNull(stats).counters(), value); + } + + /** + * Assert that a gauge has an expected value. + * @param stats statistics source + * @param key statistic key + * @param value expected value. + * @return the value (which always equals the expected value) + */ + public static long verifyStatisticGaugeValue( + final IOStatistics stats, + final String key, + final long value) { + return verifyStatisticValue(GAUGE, key, + verifyStatisticsNotNull(stats).gauges(), value); + } + + /** + * Assert that a maximum has an expected value. + * @param stats statistics source + * @param key statistic key + * @param value expected value. + * @return the value (which always equals the expected value) + */ + public static long verifyStatisticMaximumValue( + final IOStatistics stats, + final String key, + final long value) { + return verifyStatisticValue(MAXIMUM, key, + verifyStatisticsNotNull(stats).maximums(), value); + } + + /** + * Assert that a minimum has an expected value. + * @param stats statistics source + * @param key statistic key + * @param value expected value. + * @return the value (which always equals the expected value) + */ + public static long verifyStatisticMinimumValue( + final IOStatistics stats, + final String key, + final long value) { + return verifyStatisticValue(MINIMUM, key, + verifyStatisticsNotNull(stats).minimums(), value); + } + + /** + * Assert that a mean has an expected value. + * @param stats statistics source + * @param key statistic key + * @param value expected value. + * @return the value (which always equals the expected value) + */ + public static MeanStatistic verifyStatisticMeanValue( + final IOStatistics stats, + final String key, + final MeanStatistic value) { + return verifyStatisticValue(MEAN, key, + verifyStatisticsNotNull(stats).meanStatistics(), value); + } + + /** + * Assert that a given statistic has an expected value. + * @param type type for error text + * @param key statistic key + * @param map map to look up + * @param value expected value. + * @param type of map element + * @return the value (which always equals the expected value) + */ + private static E verifyStatisticValue( + final String type, + final String key, + final Map map, + final E value) { + final E statistic = lookupStatistic(type, key, map); + assertThat(statistic) + .describedAs("%s named %s with expected value %s", type, + key, value) + .isEqualTo(value); + return statistic; + } + + + /** + * Assert that a given statistic has an expected value. + * @param type of map element + * @param type type for error text + * @param key statistic key + * @param map map to look up + * @return an ongoing assertion + */ + private static ObjectAssert assertThatStatistic( + final String type, + final String key, + final Map map) { + final E statistic = lookupStatistic(type, key, map); + return assertThat(statistic) + .describedAs("%s named %s", type, key); + } + + /** + * Assert that a given statistic has an expected value. + * @param type of map element + * @param type type for error text + * @param key statistic key + * @param map map to look up + * @return an ongoing assertion + */ + private static AbstractLongAssert assertThatStatisticLong( + final String type, + final String key, + final Map map) { + final long statistic = lookupStatistic(type, key, map); + return assertThat(statistic) + .describedAs("%s named %s", type, key); + } + + /** + * Start an assertion chain on + * a required counter statistic. + * @param stats statistics source + * @param key statistic key + * @return an ongoing assertion + */ + public static AbstractLongAssert assertThatStatisticCounter( + final IOStatistics stats, + final String key) { + return assertThatStatisticLong(COUNTER, key, + verifyStatisticsNotNull(stats).counters()); + } + + /** + * Start an assertion chain on + * a required gauge statistic. + * @param stats statistics source + * @param key statistic key + * @return an ongoing assertion + */ + public static AbstractLongAssert assertThatStatisticGauge( + final IOStatistics stats, + final String key) { + return assertThatStatisticLong(GAUGE, key, + verifyStatisticsNotNull(stats).gauges()); + } + + /** + * Start an assertion chain on + * a required minimum statistic. + * @param stats statistics source + * @param key statistic key + * @return an ongoing assertion + */ + public static AbstractLongAssert assertThatStatisticMinimum( + final IOStatistics stats, + final String key) { + return assertThatStatisticLong(MINIMUM, key, + verifyStatisticsNotNull(stats).minimums()); + } + + /** + * Start an assertion chain on + * a required maximum statistic. + * @param stats statistics source + * @param key statistic key + * @return an ongoing assertion + */ + public static AbstractLongAssert assertThatStatisticMaximum( + final IOStatistics stats, + final String key) { + return assertThatStatisticLong(MAXIMUM, key, + verifyStatisticsNotNull(stats).maximums()); + } + + /** + * Start an assertion chain on + * a required mean statistic. + * @param stats statistics source + * @param key statistic key + * @return an ongoing assertion + */ + public static ObjectAssert assertThatStatisticMean( + final IOStatistics stats, + final String key) { + return assertThatStatistic(MEAN, key, + verifyStatisticsNotNull(stats).meanStatistics()); + } + + /** + * Start an assertion chain on + * a required mean statistic with the initial validation on the + * sample count and sum. + * @param stats statistics source + * @param key statistic key + * @return an ongoing assertion + */ + public static ObjectAssert assertThatStatisticMeanMatches( + final IOStatistics stats, + final String key, + final long samples, + final long sum) { + return assertThatStatisticMean(stats, key) + .matches(p -> (p.getSamples() == samples), + "samples == " + samples) + .matches(p -> (p.getSum() == sum), + "sum == " + sum); + } + + /** + * Assert that a given counter statistic is untracked. + * @param stats statistics source + * @param type type for error text + * @param key statistic key + * @param map map to probe + */ + private static void assertUntracked(final IOStatistics stats, + final String type, + final String key, + final Map map) { + assertThat(map.containsKey(key)) + .describedAs("%s %s is tracked in %s", type, key, stats) + .isFalse(); + } + + /** + * Assert that a given counter statistic is untracked. + * @param stats statistics source + * @param type type for error text + * @param key statistic key + * @param map map to probe + */ + private static void assertTracked(final IOStatistics stats, + final String type, + final String key, + final Map map) { + assertThat(map.containsKey(key)) + .describedAs("%s %s is not tracked in %s", type, key, stats) + .isTrue(); + } + + /** + * Assert that a given statistic is tracked. + * @param stats statistics source + * @param key statistic key + */ + public static void assertStatisticCounterIsTracked( + final IOStatistics stats, + final String key) { + assertTracked(stats, COUNTER, key, + verifyStatisticsNotNull(stats).counters()); + } + + /** + * Assert that a given counter statistic is untracked. + * @param stats statistics source + * @param key statistic key + */ + public static void assertStatisticCounterIsUntracked( + final IOStatistics stats, + final String key) { + assertUntracked(stats, COUNTER, key, + verifyStatisticsNotNull(stats).counters()); + } + + /** + * Assert that an object is a statistics source and that the + * statistics is not null. + * @param source source object. + */ + public static void assertIsStatisticsSource(Object source) { + assertThat(source) + .describedAs("Object %s", source) + .isInstanceOf(IOStatisticsSource.class) + .extracting(o -> ((IOStatisticsSource) o).getIOStatistics()) + .isNotNull(); + } + + /** + * Query the source for the statistics; fails if the statistics + * returned are null or the class does not implement the API. + * @param source source object. + * @return the statistics it provides. + */ + public static IOStatistics extractStatistics(Object source) { + assertThat(source) + .describedAs("Object %s", source) + .isInstanceOf(IOStatisticsSource.class); + IOStatisticsSource ios = (IOStatisticsSource) source; + return extractStatistics(ios); + } + + /** + * Get the non-null statistics. + * @param ioStatisticsSource source + * @return the statistics, guaranteed to be non null + */ + private static IOStatistics extractStatistics( + final IOStatisticsSource ioStatisticsSource) { + IOStatistics statistics = ioStatisticsSource.getIOStatistics(); + assertThat(statistics) + .describedAs("Statistics from %s", ioStatisticsSource) + .isNotNull(); + return statistics; + } + + /** + * Perform a serialization round trip on a statistics instance. + * @param stat statistic + * @return the deserialized version. + */ + public static IOStatistics statisticsJavaRoundTrip(final IOStatistics stat) + throws IOException, ClassNotFoundException { + assertThat(stat).isInstanceOf(Serializable.class); + ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); + try (ObjectOutputStream oos = new ObjectOutputStream(baos)) { + oos.writeObject(stat); + } + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + IOStatistics deser; + try (ObjectInputStream ois = new RestrictedInput(bais, + IOStatisticsSnapshot.requiredSerializationClasses())) { + deser = (IOStatistics) ois.readObject(); + } + return deser; + } + + private static final class RestrictedInput extends ObjectInputStream { + + private final List allowedClasses; + + private RestrictedInput(final InputStream in, + final List allowedClasses) throws IOException { + + super(in); + this.allowedClasses = allowedClasses.stream() + .map(Class::getName) + .collect(Collectors.toList()); + } + + @Override + protected Class resolveClass(final ObjectStreamClass desc) + throws IOException, ClassNotFoundException { + final String classname = desc.getName(); + if (!allowedClasses.contains(classname)) { + throw new ClassNotFoundException("Class " + classname + + " Not in list of allowed classes"); + } + + return super.resolveClass(desc); + } + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestDurationTracking.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestDurationTracking.java new file mode 100644 index 0000000000000..8258b62c1f759 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestDurationTracking.java @@ -0,0 +1,361 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.impl.FutureIOSupport; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.test.AbstractHadoopTestBase; +import org.apache.hadoop.util.functional.FunctionRaisingIOE; +import org.apache.hadoop.util.functional.FutureIO; + +import static org.apache.hadoop.fs.statistics.DurationStatisticSummary.fetchDurationSummary; +import static org.apache.hadoop.fs.statistics.DurationStatisticSummary.fetchSuccessSummary; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.*; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.*; +import static org.apache.hadoop.fs.statistics.impl.StubDurationTrackerFactory.STUB_DURATION_TRACKER_FACTORY; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Test the IOStatistic DurationTracker logic. + */ +public class TestDurationTracking extends AbstractHadoopTestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestDurationTracking.class); + + private static final String REQUESTS = "requests"; + + public static final String UNKNOWN = "unknown"; + + private IOStatisticsStore stats; + + private final AtomicInteger invocationCounter = new AtomicInteger(0); + + @Before + public void setup() { + stats = iostatisticsStore() + .withDurationTracking(REQUESTS) + .build(); + } + + @After + public void teardown() { + LOG.info("stats {}", stats); + } + + /** + * Duration tracking. + */ + @Test + public void testDurationTryWithResources() throws Throwable { + DurationTracker tracker = + stats.trackDuration(REQUESTS); + verifyStatisticCounterValue(stats, REQUESTS, 1L); + sleep(); + tracker.close(); + try (DurationTracker ignored = + stats.trackDuration(REQUESTS)) { + sleep(); + } + LOG.info("Statistics: {}", stats); + DurationStatisticSummary summary = fetchSuccessSummary(stats, REQUESTS); + assertSummaryValues(summary, 2, 1, 1); + assertSummaryMean(summary, 2, 0); + } + + /** + * A little sleep method; exceptions are swallowed. + * Increments {@link #invocationCounter}. + * Increments {@inheritDoc #atomicCounter}. + */ + public void sleep() { + sleepf(10); + } + + /** + * A little sleep function; exceptions are swallowed. + * Increments {@link #invocationCounter}. + */ + protected int sleepf(final int millis) { + invocationCounter.incrementAndGet(); + try { + Thread.sleep(millis); + } catch (InterruptedException ignored) { + } + return millis; + } + + /** + * Assert that the sleep counter has been invoked + * the expected number of times. + * @param expected expected value + */ + private void assertCounterValue(final int expected) { + assertThat(invocationCounter.get()) + .describedAs("Sleep invocation Counter") + .isEqualTo(expected); + } + + /** + * Test that a function raising an IOE can be wrapped. + */ + @Test + public void testDurationFunctionIOE() throws Throwable { + FunctionRaisingIOE fn = + trackFunctionDuration(stats, REQUESTS, + (Integer x) -> invocationCounter.getAndSet(x)); + assertThat(fn.apply(1)).isEqualTo(0); + assertCounterValue(1); + assertSummaryValues( + fetchSuccessSummary(stats, REQUESTS), + 1, 0, 0); + } + + /** + * Trigger a failure and verify its the failure statistics + * which go up. + */ + @Test + public void testDurationFunctionIOEFailure() throws Throwable { + FunctionRaisingIOE fn = + trackFunctionDuration(stats, REQUESTS, + (Integer x) -> { + sleep(); + return 100 / x; + }); + intercept(ArithmeticException.class, + () -> fn.apply(0)); + assertSummaryValues( + fetchSuccessSummary(stats, REQUESTS), + 1, -1, -1); + + DurationStatisticSummary failures = fetchDurationSummary(stats, REQUESTS, + false); + assertSummaryValues(failures, 1, 0, 0); + assertSummaryMean(failures, 1, 0); + } + + /** + * Trigger a failure and verify its the failure statistics + * which go up. + */ + @Test + public void testDurationJavaFunctionFailure() throws Throwable { + Function fn = + trackJavaFunctionDuration(stats, REQUESTS, + (Integer x) -> { + return 100 / x; + }); + intercept(ArithmeticException.class, + () -> fn.apply(0)); + assertSummaryValues( + fetchSuccessSummary(stats, REQUESTS), + 1, -1, -1); + + DurationStatisticSummary failures = fetchDurationSummary(stats, REQUESTS, + false); + assertSummaryValues(failures, 1, 0, 0); + } + + /** + * Test trackDurationOfCallable. + */ + @Test + public void testCallableDuration() throws Throwable { + // call the operation + assertThat( + trackDurationOfCallable(stats, REQUESTS, () -> sleepf(100)).call()) + .isEqualTo(100); + DurationStatisticSummary summary = fetchSuccessSummary(stats, REQUESTS); + assertSummaryValues(summary, 1, 0, 0); + assertSummaryMean(summary, 1, 0); + } + + /** + * Callable raising an RTE after a sleep; failure + * stats will be updated and the execution count will be + * 1. + */ + @Test + public void testCallableFailureDuration() throws Throwable { + + intercept(RuntimeException.class, + trackDurationOfCallable(stats, REQUESTS, () -> { + sleepf(100); + throw new RuntimeException("oops"); + })); + assertCounterValue(1); + assertSummaryValues( + fetchSuccessSummary(stats, REQUESTS), + 1, -1, -1); + + assertSummaryValues(fetchDurationSummary(stats, REQUESTS, false), + 1, 0, 0); + } + + /** + * Duration of the successful execution of a InvocationRaisingIOE. + */ + @Test + public void testInvocationDuration() throws Throwable { + // call the operation + trackDurationOfInvocation(stats, REQUESTS, () -> { + sleepf(100); + }); + assertCounterValue(1); + DurationStatisticSummary summary = fetchSuccessSummary(stats, REQUESTS); + assertSummaryValues(summary, 1, 0, 0); + assertSummaryMean(summary, 1, 0); + } + + /** + * Duration of the successful execution of a CallableRaisingIOE. + */ + @Test + public void testCallableIOEDuration() throws Throwable { + // call the operation + assertThat( + trackDuration(stats, REQUESTS, () -> sleepf(100))) + .isEqualTo(100); + DurationStatisticSummary summary = fetchSuccessSummary(stats, REQUESTS); + assertSummaryValues(summary, 1, 0, 0); + assertSummaryMean(summary, 1, 0); + } + + /** + * Track the duration of an IOE raising callable which fails. + */ + @Test + public void testCallableIOEFailureDuration() throws Throwable { + intercept(IOException.class, + () -> + trackDuration(stats, REQUESTS, () -> { + sleepf(100); + throw new IOException("oops"); + })); + assertSummaryValues( + fetchSuccessSummary(stats, REQUESTS), + 1, -1, -1); + + assertSummaryValues(fetchDurationSummary(stats, REQUESTS, false), + 1, 0, 0); + } + + + /** + * Track the duration of an IOE raising callable which fails. + */ + @Test + public void testDurationThroughEval() throws Throwable { + CompletableFuture eval = FutureIOSupport.eval( + trackDurationOfOperation(stats, REQUESTS, () -> { + sleepf(100); + throw new FileNotFoundException("oops"); + })); + intercept(FileNotFoundException.class, "oops", () -> + FutureIO.awaitFuture(eval)); + assertSummaryValues(fetchDurationSummary(stats, REQUESTS, false), + 1, 0, 0); + } + + /** + * It's OK to track a duration against an unknown statistic. + */ + @Test + public void testUnknownDuration() throws Throwable { + trackDurationOfCallable(stats, UNKNOWN, () -> sleepf(1)).call(); + DurationStatisticSummary summary = fetchSuccessSummary(stats, UNKNOWN); + assertSummaryValues(summary, 0, -1, -1); + assertThat(summary.getMean()).isNull(); + } + + /** + * The stub duration tracker factory can be supplied as an input. + */ + @Test + public void testTrackDurationWithStubFactory() throws Throwable { + trackDuration(STUB_DURATION_TRACKER_FACTORY, UNKNOWN, () -> sleepf(1)); + } + + /** + * Make sure the tracker returned from the stub factory + * follows the basic lifecycle. + */ + @Test + public void testStubDurationLifecycle() throws Throwable { + DurationTracker tracker = STUB_DURATION_TRACKER_FACTORY + .trackDuration("k", 1); + tracker.failed(); + tracker.close(); + tracker.close(); + } + + /** + * Assert that a statistics summary has the specific values. + * @param summary summary data + * @param count count -must match exactly. + * @param minBase minimum value for the minimum field (inclusive) + * @param maxBase minimum value for the maximum field (inclusive) + */ + protected void assertSummaryValues( + final DurationStatisticSummary summary, + final int count, + final int minBase, + final int maxBase) { + assertThat(summary) + .matches(s -> s.getCount() == count, "Count value") + .matches(s -> s.getMax() >= maxBase, "Max value") + .matches(s -> s.getMin() >= minBase, "Min value"); + } + + /** + * Assert that at a summary has a matching mean value. + * @param summary summary data. + * @param expectedSampleCount sample count -which must match + * @param meanGreaterThan the mean must be greater than this value. + */ + protected void assertSummaryMean( + final DurationStatisticSummary summary, + final int expectedSampleCount, + final double meanGreaterThan) { + String description = "mean of " + summary; + assertThat(summary.getMean()) + .describedAs(description) + .isNotNull(); + assertThat(summary.getMean().getSamples()) + .describedAs(description) + .isEqualTo(expectedSampleCount); + assertThat(summary.getMean().mean()) + .describedAs(description) + .isGreaterThan(meanGreaterThan); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestDynamicIOStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestDynamicIOStatistics.java new file mode 100644 index 0000000000000..9b929ac82ff11 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestDynamicIOStatistics.java @@ -0,0 +1,311 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import org.assertj.core.api.Assertions; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.statistics.impl.SourceWrappedStatistics; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertStatisticCounterIsTracked; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertStatisticCounterIsUntracked; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.demandStringifyIOStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.demandStringifyIOStatisticsSource; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.ENTRY_PATTERN; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.NULL_SOURCE; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.dynamicIOStatistics; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.emptyStatistics; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +/** + * verify dynamic statistics are dynamic, except when you iterate through + * them, along with other tests of the class's behavior. + */ +public class TestDynamicIOStatistics extends AbstractHadoopTestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestDynamicIOStatistics.class); + + private static final String ALONG = "along"; + + private static final String AINT = "aint"; + + private static final String COUNT = "count"; + + private static final String EVAL = "eval"; + + /** + * The statistics. + */ + private IOStatistics statistics = emptyStatistics(); + + /** + * A source of these statistics. + */ + private IOStatisticsSource statsSource; + + private final AtomicLong aLong = new AtomicLong(); + + private final AtomicInteger aInt = new AtomicInteger(); + + private final MutableCounterLong counter = new MutableCounterLong( + new Info("counter"), 0); + + private long evalLong; + + private static final String[] KEYS = new String[]{ALONG, AINT, COUNT, EVAL}; + + @Before + public void setUp() throws Exception { + statistics = dynamicIOStatistics() + .withAtomicLongCounter(ALONG, aLong) + .withAtomicIntegerCounter(AINT, aInt) + .withMutableCounter(COUNT, counter) + .withLongFunctionCounter(EVAL, x -> evalLong) + .build(); + statsSource = new SourceWrappedStatistics(statistics); + } + + /** + * The eval operation is foundational. + */ + @Test + public void testEval() throws Throwable { + verifyStatisticCounterValue(statistics, EVAL, 0); + evalLong = 10; + verifyStatisticCounterValue(statistics, EVAL, 10); + } + + /** + * Atomic Long statistic. + */ + @Test + public void testAlong() throws Throwable { + verifyStatisticCounterValue(statistics, ALONG, 0); + aLong.addAndGet(1); + verifyStatisticCounterValue(statistics, ALONG, 1); + } + + /** + * Atomic Int statistic. + */ + @Test + public void testAint() throws Throwable { + verifyStatisticCounterValue(statistics, AINT, 0); + aInt.addAndGet(1); + verifyStatisticCounterValue(statistics, AINT, 1); + } + + /** + * Metrics2 counter. + */ + @Test + public void testCounter() throws Throwable { + verifyStatisticCounterValue(statistics, COUNT, 0); + counter.incr(); + verifyStatisticCounterValue(statistics, COUNT, 1); + } + + /** + * keys() returns all the keys. + */ + @Test + public void testKeys() throws Throwable { + Assertions.assertThat(statistics.counters().keySet()) + .describedAs("statistic keys of %s", statistics) + .containsExactlyInAnyOrder(KEYS); + } + + @Test + public void testIteratorHasAllKeys() throws Throwable { + // go through the statistics iterator and assert that it contains exactly + // the values. + assertThat(statistics.counters().keySet()) + .containsExactlyInAnyOrder(KEYS); + } + + /** + * Verify that the iterator is taken from + * a snapshot of the values. + */ + @Test + public void testIteratorIsSnapshot() throws Throwable { + // set the counters all to 1 + incrementAllCounters(); + // take the snapshot + final Iterator> it = + statistics.counters().entrySet().iterator(); + // increment the counters + incrementAllCounters(); + // now assert that all the iterator values are of value 1 + while (it.hasNext()) { + Map.Entry next = it.next(); + assertThat(next.getValue()) + .describedAs("Value of entry %s", next) + .isEqualTo(1); + } + } + + @Test + public void testUnknownStatistic() throws Throwable { + assertStatisticCounterIsUntracked(statistics, "anything"); + } + + @Test + public void testStatisticsTrackedAssertion() throws Throwable { + // expect an exception to be raised when an assertion + // is made that an unknown statistic is tracked,. + assertThatThrownBy(() -> + assertStatisticCounterIsTracked(statistics, "anything")) + .isInstanceOf(AssertionError.class); + } + + @Test + public void testStatisticsValueAssertion() throws Throwable { + // expect an exception to be raised when + // an assertion is made about the value of an unknown statistics + assertThatThrownBy(() -> + verifyStatisticCounterValue(statistics, "anything", 0)) + .isInstanceOf(AssertionError.class); + } + + /** + * Serialization round trip will preserve all the values. + */ + @Test + public void testSerDeser() throws Throwable { + incrementAllCounters(); + IOStatistics stat = IOStatisticsSupport.snapshotIOStatistics(statistics); + incrementAllCounters(); + IOStatistics deser = IOStatisticAssertions.statisticsJavaRoundTrip(stat); + assertThat(deser.counters().keySet()) + .containsExactlyInAnyOrder(KEYS); + for (Map.Entry e : deser.counters().entrySet()) { + assertThat(e.getValue()) + .describedAs("Value of entry %s", e) + .isEqualTo(1); + } + } + + @Test + public void testStringification() throws Throwable { + assertThat(ioStatisticsToString(statistics)) + .isNotBlank() + .contains(KEYS); + } + + @Test + public void testDemandStringification() throws Throwable { + String counterPattern = ENTRY_PATTERN; + // this is not yet evaluated + Object demand = demandStringifyIOStatistics(statistics); + // nor is this. + Object demandSource = demandStringifyIOStatisticsSource(statsSource); + + // show it evaluates + String formatted1 = String.format(counterPattern, ALONG, aLong.get()); + assertThat(demand + .toString()) + .contains(formatted1); + assertThat(demandSource + .toString()) + .contains(formatted1); + + // when the counters are incremented + incrementAllCounters(); + incrementAllCounters(); + // there are new values to expect + String formatted2 = String.format(counterPattern, ALONG, aLong.get()); + assertThat(demand + .toString()) + .doesNotContain(formatted1) + .contains(formatted2); + assertThat(demandSource + .toString()) + .doesNotContain(formatted1) + .contains(formatted2); + } + + @Test + public void testNullSourceStringification() throws Throwable { + assertThat(demandStringifyIOStatisticsSource((IOStatisticsSource) null) + .toString()) + .isEqualTo(NULL_SOURCE); + } + + @Test + public void testNullStatStringification() throws Throwable { + assertThat(demandStringifyIOStatistics((IOStatistics) null) + .toString()) + .isEqualTo(NULL_SOURCE); + } + + @Test + public void testStringLogging() throws Throwable { + LOG.info("Output {}", demandStringifyIOStatistics(statistics)); + } + + /** + * Increment all the counters from their current value. + */ + private void incrementAllCounters() { + aLong.incrementAndGet(); + aInt.incrementAndGet(); + evalLong += 1; + counter.incr(); + } + + /** + * Needed to provide a metrics info instance for the counter + * constructor. + */ + private static final class Info implements MetricsInfo { + + private final String name; + + private Info(final String name) { + this.name = name; + } + + @Override + public String name() { + return name; + } + + @Override + public String description() { + return name; + } + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestEmptyIOStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestEmptyIOStatistics.java new file mode 100644 index 0000000000000..296470abaa9bf --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestEmptyIOStatistics.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import org.junit.Test; + +import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertStatisticCounterIsTracked; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertStatisticCounterIsUntracked; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.emptyStatistics; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +/** + * Test handling of the empty IO statistics class. + */ +public class TestEmptyIOStatistics extends AbstractHadoopTestBase { + + private final IOStatistics empty = emptyStatistics(); + + @Test + public void testUnknownStatistic() throws Throwable { + assertStatisticCounterIsUntracked(empty, "anything"); + } + + @Test + public void testStatisticsTrackedAssertion() throws Throwable { + // expect an exception to be raised when an assertion + // is made that an unknown statistic is tracked,. + assertThatThrownBy(() -> + assertStatisticCounterIsTracked(empty, "anything")) + .isInstanceOf(AssertionError.class); + } + + @Test + public void testStatisticsValueAssertion() throws Throwable { + // expect an exception to be raised when + // an assertion is made about the value of an unknown statistics + assertThatThrownBy(() -> + verifyStatisticCounterValue(empty, "anything", 0)) + .isInstanceOf(AssertionError.class); + } + + @Test + public void testEmptySnapshot() throws Throwable { + final IOStatistics stat = IOStatisticsSupport.snapshotIOStatistics(empty); + assertThat(stat.counters().keySet()) + .describedAs("keys of snapshot") + .isEmpty(); + IOStatistics deser = IOStatisticAssertions.statisticsJavaRoundTrip(stat); + assertThat(deser.counters().keySet()) + .describedAs("keys of deserialized snapshot") + .isEmpty(); + } + + @Test + public void testStringification() throws Throwable { + assertThat(ioStatisticsToString(empty)) + .isNotBlank(); + } + + @Test + public void testWrap() throws Throwable { + IOStatisticsSource statisticsSource = IOStatisticsBinding.wrap(empty); + assertThat(statisticsSource.getIOStatistics()) + .isSameAs(empty); + } + + @Test + public void testStringifyNullSource() throws Throwable { + assertThat(IOStatisticsLogging.ioStatisticsSourceToString(null)) + .isEmpty(); + } + + @Test + public void testStringifyNullStats() throws Throwable { + assertThat( + IOStatisticsLogging.ioStatisticsSourceToString( + IOStatisticsBinding.wrap(null))) + .isEmpty(); + } + + @Test + public void testStringificationNull() throws Throwable { + assertThat(ioStatisticsToString(null)) + .describedAs("Null statistics should stringify to \"\"") + .isEmpty(); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestIOStatisticsSnapshot.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestIOStatisticsSnapshot.java new file mode 100644 index 0000000000000..41e9bffefe834 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestIOStatisticsSnapshot.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import org.assertj.core.api.Assertions; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; +import org.apache.hadoop.test.AbstractHadoopTestBase; +import org.apache.hadoop.util.JsonSerialization; + +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.*; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +/** + * Test handling of the {@link IOStatisticsSnapshot} class. + */ +public class TestIOStatisticsSnapshot extends AbstractHadoopTestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestIOStatisticsSnapshot.class); + + /** + * Simple snapshot built up in test setup. + */ + private final IOStatisticsSnapshot snapshot = new IOStatisticsSnapshot(); + + /** Saved to the snapshot as "mean01". */ + private MeanStatistic mean0; + + /** Saved to the snapshot as "mean1". */ + private MeanStatistic mean1; + + @Before + public void setup() throws Exception { + snapshot.counters().put("c1", 0L); + snapshot.gauges().put("g1", 1L); + snapshot.minimums().put("m1", -1L); + mean1 = new MeanStatistic(1, 1); + snapshot.meanStatistics().put("mean1", + mean1); + mean0 = new MeanStatistic(0, 1); + snapshot.meanStatistics().put("mean0", + mean0); + } + + @Test + public void testTrackedValues() throws Throwable { + verifyStatisticCounterValue(snapshot, "c1", 0L); + verifyStatisticGaugeValue(snapshot, "g1", 1L); + verifyStatisticMinimumValue(snapshot, "m1", -1L); + verifyStatisticMeanValue(snapshot, "mean0", + new MeanStatistic(0, 1)); + } + + @Test + public void testStatisticsValueAssertion() throws Throwable { + // expect an exception to be raised when + // an assertion is made about the value of an unknown statistics + assertThatThrownBy(() -> + verifyStatisticCounterValue(snapshot, "anything", 0)) + .isInstanceOf(AssertionError.class); + } + + @Test + public void testStringification() throws Throwable { + assertThat(ioStatisticsToString(snapshot)) + .isNotBlank(); + } + + @Test + public void testStringification2() throws Throwable { + + String ss = snapshot.toString(); + LOG.info("original {}", ss); + Assertions.assertThat(ss) + .describedAs("snapshot toString()") + .contains("c1=0") + .contains("g1=1"); + } + + @Test + public void testWrap() throws Throwable { + IOStatisticsSource statisticsSource = IOStatisticsBinding.wrap(snapshot); + assertThat(statisticsSource.getIOStatistics()) + .isSameAs(snapshot); + } + + @Test + public void testJsonRoundTrip() throws Throwable { + JsonSerialization serializer + = IOStatisticsSnapshot.serializer(); + + String json = serializer.toJson(snapshot); + LOG.info("serialized form\n{}", json); + IOStatisticsSnapshot deser = serializer.fromJson(json); + verifyDeserializedInstance(deser); + } + + /** + * Verify the deserialized instance's data + * matches the expected values. + * @param deser deserialized vlaue. + */ + public void verifyDeserializedInstance( + final IOStatistics deser) { + LOG.info("deserialized {}", deser); + verifyStatisticCounterValue(deser, "c1", 0L); + verifyStatisticGaugeValue(deser, "g1", 1L); + verifyStatisticMinimumValue(deser, "m1", -1L); + verifyStatisticMeanValue(deser, "mean0", + new MeanStatistic(0, 1)); + verifyStatisticMeanValue(deser, "mean1", + snapshot.meanStatistics().get("mean1")); + } + + @Test + public void testJavaRoundTrip() throws Throwable { + verifyDeserializedInstance( + IOStatisticAssertions.statisticsJavaRoundTrip( + snapshot)); + + + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestIOStatisticsStore.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestIOStatisticsStore.java new file mode 100644 index 0000000000000..778eab8315aa5 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestIOStatisticsStore.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import org.assertj.core.api.Assertions; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.test.AbstractHadoopTestBase; +import org.apache.hadoop.util.JsonSerialization; + +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticMeanMatches; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticGaugeValue; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticMaximumValue; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticMinimumValue; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.snapshotIOStatistics; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; + +/** + * Test the IOStatisticStore implementation. + */ +public class TestIOStatisticsStore extends AbstractHadoopTestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestIOStatisticsStore.class); + + + private static final String COUNT = "count"; + + private static final String GAUGE = "gauge"; + + private static final String MIN = "min"; + + private static final String MAX = "max"; + + private static final String MEAN = "mean"; + + public static final String UNKNOWN = "unknown"; + + private IOStatisticsStore stats; + + @Before + public void setup() { + stats = iostatisticsStore() + .withCounters(COUNT) + .withGauges(GAUGE) + .withMinimums(MIN) + .withMaximums(MAX) + .withMeanStatistics(MEAN) + .build(); + } + + @After + public void teardown() { + LOG.info("stats {}", stats); + } + + /** + * Gauges go up and down. + */ + @Test + public void testGauges() throws Throwable { + stats.setGauge(GAUGE, 1); + verifyStatisticGaugeValue(stats, GAUGE, 1); + stats.incrementGauge(GAUGE, 1); + verifyStatisticGaugeValue(stats, GAUGE, 2); + stats.setGauge(GAUGE, -1); + verifyStatisticGaugeValue(stats, GAUGE, -1); + Assertions.assertThat(stats.incrementGauge(GAUGE, -1)) + .isEqualTo(-2); + verifyStatisticGaugeValue(stats, GAUGE, -2); + Assertions.assertThat(stats.getGaugeReference(GAUGE).get()) + .isEqualTo(-2); + stats.setGauge(UNKNOWN, 1); + Assertions.assertThat(stats.incrementGauge(UNKNOWN, 1)) + .isEqualTo(0); + } + + @Test + public void testMinimums() throws Throwable { + stats.setMinimum(MIN, 100); + verifyStatisticMinimumValue(stats, MIN, 100); + stats.setMinimum(MIN, 100); + // will do nothing as it is higher + stats.addMinimumSample(MIN, 200); + verifyStatisticMinimumValue(stats, MIN, 100); + stats.addMinimumSample(MIN, 10); + verifyStatisticMinimumValue(stats, MIN, 10); + stats.setMinimum(UNKNOWN, 100); + stats.addMinimumSample(UNKNOWN, 200); + } + + @Test + public void testMaximums() throws Throwable { + stats.setMaximum(MAX, 100); + verifyStatisticMaximumValue(stats, MAX, 100); + stats.setMaximum(MAX, 100); + stats.addMaximumSample(MAX, 200); + verifyStatisticMaximumValue(stats, MAX, 200); + stats.addMaximumSample(MAX, 10); + verifyStatisticMaximumValue(stats, MAX, 200); + stats.setMaximum(UNKNOWN, 100); + stats.addMaximumSample(UNKNOWN, 200); + } + + @Test + public void testMeans() throws Throwable { + stats.setMeanStatistic(MEAN, + new MeanStatistic(1, 1)); + + assertThatStatisticMeanMatches(stats, MEAN, 1, 1) + .matches(p -> p.mean() == 1, "mean"); + stats.addMeanStatisticSample(MEAN, 9); + assertThatStatisticMeanMatches(stats, MEAN, 2, 10) + .matches(p -> p.mean() == 5, "mean"); + } + + @Test + public void testRoundTrip() throws Throwable { + JsonSerialization serializer + = IOStatisticsSnapshot.serializer(); + stats.incrementCounter(COUNT); + stats.setGauge(GAUGE, -1); + stats.addMaximumSample(MAX, 200); + stats.addMinimumSample(MIN, -100); + stats.addMeanStatisticSample(MEAN, 1); + stats.addMeanStatisticSample(MEAN, 9); + + String json = serializer.toJson(snapshotIOStatistics(stats)); + LOG.info("serialized form\n{}", json); + IOStatisticsSnapshot deser = serializer.fromJson(json); + LOG.info("deserialized {}", deser); + verifyStatisticCounterValue(deser, COUNT, 1L); + verifyStatisticGaugeValue(deser, GAUGE, -1); + verifyStatisticMaximumValue(deser, MAX, 200); + verifyStatisticMinimumValue(deser, MIN, -100); + assertThatStatisticMeanMatches(deser, MEAN, 2, 10) + .matches(p -> p.mean() == 5, "mean"); + + } + + @Test + public void testUnknownCounter() throws Throwable { + Assertions.assertThat(stats.incrementCounter("unknown", -10)) + .isEqualTo(0); + } + + @Test + public void testNegativeCounterIncrementIgnored() throws Throwable { + Assertions.assertThat(stats.incrementCounter(COUNT, 2)) + .isEqualTo(2); + Assertions.assertThat(stats.incrementCounter(COUNT, -10)) + .isEqualTo(2); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestMeanStatistic.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestMeanStatistic.java new file mode 100644 index 0000000000000..749a6ee4d9eb4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/statistics/TestMeanStatistic.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.statistics; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.test.AbstractHadoopTestBase; +import org.apache.hadoop.util.JsonSerialization; + +/** + * Test the {@link MeanStatistic} class. + */ +public class TestMeanStatistic extends AbstractHadoopTestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestMeanStatistic.class); + + private static final int TEN = 10; + + private static final double ZEROD = 0.0d; + + private static final double TEND = 10.0d; + + private final MeanStatistic empty = new MeanStatistic(0, 0); + + private final MeanStatistic tenFromOne = new MeanStatistic(1, TEN); + + private final MeanStatistic tenFromTen = new MeanStatistic(TEN, TEN); + + @Test + public void testEmptiness() throws Throwable { + Assertions.assertThat(empty) + .matches(MeanStatistic::isEmpty, "is empty") + .isEqualTo(new MeanStatistic(0, TEN)) + .isEqualTo(new MeanStatistic()) + .isNotEqualTo(tenFromOne); + Assertions.assertThat(empty.mean()) + .isEqualTo(ZEROD); + Assertions.assertThat(empty.toString()) + .contains("0.0"); + } + + @Test + public void testTenFromOne() throws Throwable { + Assertions.assertThat(tenFromOne) + .matches(p -> !p.isEmpty(), "is not empty") + .isEqualTo(tenFromOne) + .isNotEqualTo(tenFromTen); + Assertions.assertThat(tenFromOne.mean()) + .isEqualTo(TEND); + } + + @Test + public void testNegativeSamplesAreEmpty() throws Throwable { + MeanStatistic stat = new MeanStatistic(-10, 1); + Assertions.assertThat(stat) + .describedAs("stat with negative samples") + .matches(MeanStatistic::isEmpty, "is empty") + .isEqualTo(empty) + .extracting(MeanStatistic::mean) + .isEqualTo(ZEROD); + Assertions.assertThat(stat.toString()) + .contains("0.0"); + + } + + @Test + public void testCopyNonEmpty() throws Throwable { + MeanStatistic stat = tenFromOne.copy(); + Assertions.assertThat(stat) + .describedAs("copy of " + tenFromOne) + .isEqualTo(tenFromOne) + .isNotSameAs(tenFromOne); + } + + @Test + public void testCopyEmpty() throws Throwable { + MeanStatistic stat = empty.copy(); + Assertions.assertThat(stat) + .describedAs("copy of " + empty) + .isEqualTo(empty) + .isNotSameAs(empty); + } + + @Test + public void testDoubleSamples() throws Throwable { + MeanStatistic stat = tenFromOne.copy(); + Assertions.assertThat(stat.add(tenFromOne)) + .isEqualTo(new MeanStatistic(2, 20)) + .extracting(MeanStatistic::mean) + .isEqualTo(TEND); + } + + @Test + public void testAddEmptyR() throws Throwable { + MeanStatistic stat = tenFromOne.copy(); + Assertions.assertThat(stat.add(empty)) + .isEqualTo(tenFromOne); + } + + @Test + public void testAddEmptyL() throws Throwable { + MeanStatistic stat = empty.copy(); + Assertions.assertThat(stat.add(tenFromOne)) + .isEqualTo(tenFromOne); + } + + @Test + public void testAddEmptyLR() throws Throwable { + MeanStatistic stat = empty.copy(); + Assertions.assertThat(stat.add(empty)) + .isEqualTo(empty); + } + + @Test + public void testAddSampleToEmpty() throws Throwable { + MeanStatistic stat = empty.copy(); + stat.addSample(TEN); + Assertions.assertThat(stat) + .isEqualTo(tenFromOne); + } + + @Test + public void testAddZeroValueSamples() throws Throwable { + MeanStatistic stat = tenFromOne.copy(); + for (int i = 0; i < 9; i++) { + stat.addSample(0); + } + Assertions.assertThat(stat) + .isEqualTo(tenFromTen); + } + + @Test + public void testSetSamples() throws Throwable { + MeanStatistic stat = tenFromOne.copy(); + stat.setSamples(10); + Assertions.assertThat(stat) + .isEqualTo(tenFromTen); + } + + @Test + public void testSetSums() throws Throwable { + MeanStatistic stat = tenFromOne.copy(); + stat.setSum(100); + stat.setSamples(20); + Assertions.assertThat(stat) + .isEqualTo(new MeanStatistic(20, 100)) + .extracting(MeanStatistic::mean) + .isEqualTo(5.0d); + } + + @Test + public void testSetNegativeSamplesMakesEmpty() throws Throwable { + MeanStatistic stat = tenFromOne.copy(); + stat.setSamples(-3); + Assertions.assertThat(stat) + .isEqualTo(empty); + } + + @Test + public void testJsonRoundTrip() throws Throwable { + JsonSerialization serializer = serializer(); + + String json = serializer.toJson(tenFromTen); + LOG.info("serialized form\n{}", json); + Assertions.assertThat(json) + .describedAs("JSON form of %s", tenFromTen) + .doesNotContain("empty") + .doesNotContain("mean"); + + MeanStatistic deser = serializer.fromJson(json); + LOG.info("deserialized {}", deser); + Assertions.assertThat(deser) + .isEqualTo(tenFromTen); + } + + /** + * negative sample counts in the json convert the stat to being empty. + */ + @Test + public void testHandleMaliciousStat() throws Throwable { + String json = "{\n" + + " \"sum\" : 10,\n" + + " \"samples\" : -10\n" + + "}"; + JsonSerialization serializer = serializer(); + MeanStatistic deser = serializer.fromJson(json); + LOG.info("deserialized {}", deser); + Assertions.assertThat(deser) + .isEqualTo(empty); + } + + /** + * Get a JSON serializer. + * @return a serializer. + */ + public static JsonSerialization serializer() { + return new JsonSerialization<>(MeanStatistic.class, true, true); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestHCFSMountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestHCFSMountTableConfigLoader.java new file mode 100644 index 0000000000000..bf7a6e32c8e93 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestHCFSMountTableConfigLoader.java @@ -0,0 +1,165 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests the mount table loading. + */ +public class TestHCFSMountTableConfigLoader { + + private static final String DOT = "."; + + private static final String TARGET_TWO = "/tar2"; + + private static final String TARGET_ONE = "/tar1"; + + private static final String SRC_TWO = "/src2"; + + private static final String SRC_ONE = "/src1"; + + private static final String TABLE_NAME = "test"; + + private MountTableConfigLoader loader = new HCFSMountTableConfigLoader(); + + private static FileSystem fsTarget; + private static Configuration conf; + private static Path targetTestRoot; + private static FileSystemTestHelper fileSystemTestHelper = + new FileSystemTestHelper(); + private static File oldVersionMountTableFile; + private static File newVersionMountTableFile; + private static final String MOUNT_LINK_KEY_SRC_ONE = + new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(DOT) + .append(TABLE_NAME).append(DOT).append(Constants.CONFIG_VIEWFS_LINK) + .append(DOT).append(SRC_ONE).toString(); + private static final String MOUNT_LINK_KEY_SRC_TWO = + new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(DOT) + .append(TABLE_NAME).append(DOT).append(Constants.CONFIG_VIEWFS_LINK) + .append(DOT).append(SRC_TWO).toString(); + + @BeforeClass + public static void init() throws Exception { + fsTarget = new LocalFileSystem(); + fsTarget.initialize(new URI("file:///"), new Configuration()); + targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); + fsTarget.delete(targetTestRoot, true); + fsTarget.mkdirs(targetTestRoot); + } + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + conf.set(String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, "file"), + LocalFileSystem.class.getName()); + oldVersionMountTableFile = + new File(new URI(targetTestRoot.toString() + "/table.1.xml")); + oldVersionMountTableFile.createNewFile(); + newVersionMountTableFile = + new File(new URI(targetTestRoot.toString() + "/table.2.xml")); + newVersionMountTableFile.createNewFile(); + } + + @Test + public void testMountTableFileLoadingWhenMultipleFilesExist() + throws Exception { + ViewFsTestSetup.addMountLinksToFile(TABLE_NAME, + new String[] {SRC_ONE, SRC_TWO }, new String[] {TARGET_ONE, + TARGET_TWO }, + new Path(newVersionMountTableFile.toURI()), conf); + loader.load(targetTestRoot.toString(), conf); + Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_TWO), TARGET_TWO); + Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_ONE), TARGET_ONE); + } + + @Test + public void testMountTableFileWithInvalidFormat() throws Exception { + Path path = new Path(new URI( + targetTestRoot.toString() + "/testMountTableFileWithInvalidFormat/")); + fsTarget.mkdirs(path); + File invalidMountFileName = + new File(new URI(path.toString() + "/table.InvalidVersion.xml")); + invalidMountFileName.createNewFile(); + // Adding mount links to make sure it will not read it. + ViewFsTestSetup.addMountLinksToFile(TABLE_NAME, + new String[] {SRC_ONE, SRC_TWO }, new String[] {TARGET_ONE, + TARGET_TWO }, + new Path(invalidMountFileName.toURI()), conf); + // Pass mount table directory + loader.load(path.toString(), conf); + Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_TWO)); + Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_ONE)); + invalidMountFileName.delete(); + } + + @Test + public void testMountTableFileWithInvalidFormatWithNoDotsInName() + throws Exception { + Path path = new Path(new URI(targetTestRoot.toString() + + "/testMountTableFileWithInvalidFormatWithNoDots/")); + fsTarget.mkdirs(path); + File invalidMountFileName = + new File(new URI(path.toString() + "/tableInvalidVersionxml")); + invalidMountFileName.createNewFile(); + // Pass mount table directory + loader.load(path.toString(), conf); + Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_TWO)); + Assert.assertEquals(null, conf.get(MOUNT_LINK_KEY_SRC_ONE)); + invalidMountFileName.delete(); + } + + @Test(expected = FileNotFoundException.class) + public void testLoadWithMountFile() throws Exception { + loader.load(new URI(targetTestRoot.toString() + "/Non-Existent-File.xml") + .toString(), conf); + } + + @Test + public void testLoadWithNonExistentMountFile() throws Exception { + ViewFsTestSetup.addMountLinksToFile(TABLE_NAME, + new String[] {SRC_ONE, SRC_TWO }, + new String[] {TARGET_ONE, TARGET_TWO }, + new Path(oldVersionMountTableFile.toURI()), conf); + loader.load(oldVersionMountTableFile.toURI().toString(), conf); + Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_TWO), TARGET_TWO); + Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_ONE), TARGET_ONE); + } + + @AfterClass + public static void tearDown() throws IOException { + fsTarget.delete(targetTestRoot, true); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPoint.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPoint.java new file mode 100644 index 0000000000000..5513b6005b41e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPoint.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.net.URI; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test Regex Mount Point. + */ +public class TestRegexMountPoint { + private static final Logger LOGGER = + LoggerFactory.getLogger(TestRegexMountPoint.class.getName()); + + private InodeTree inodeTree; + private Configuration conf; + + class TestRegexMountPointFileSystem { + public URI getUri() { + return uri; + } + + private URI uri; + + TestRegexMountPointFileSystem(URI uri) { + String uriStr = uri == null ? "null" : uri.toString(); + LOGGER.info("Create TestRegexMountPointFileSystem Via URI:" + uriStr); + this.uri = uri; + } + } + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + ConfigUtil.addLink(conf, TestRegexMountPoint.class.getName(), "/mnt", + URI.create("file:///")); + + inodeTree = new InodeTree(conf, + TestRegexMountPoint.class.getName(), null, false) { + @Override + protected TestRegexMountPointFileSystem getTargetFileSystem( + final URI uri) { + return new TestRegexMountPointFileSystem(uri); + } + + @Override + protected TestRegexMountPointFileSystem getTargetFileSystem( + final INodeDir dir) { + return new TestRegexMountPointFileSystem(null); + } + + @Override + protected TestRegexMountPointFileSystem getTargetFileSystem( + final String settings, final URI[] mergeFsURIList) { + return new TestRegexMountPointFileSystem(null); + } + }; + } + + @After + public void tearDown() throws Exception { + inodeTree = null; + } + + @Test + public void testGetVarListInString() throws IOException { + String srcRegex = "/(\\w+)"; + String target = "/$0/${1}/$1/${2}/${2}"; + RegexMountPoint regexMountPoint = + new RegexMountPoint(inodeTree, srcRegex, target, null); + regexMountPoint.initialize(); + Map> varMap = regexMountPoint.getVarInDestPathMap(); + Assert.assertEquals(varMap.size(), 3); + Assert.assertEquals(varMap.get("0").size(), 1); + Assert.assertTrue(varMap.get("0").contains("$0")); + Assert.assertEquals(varMap.get("1").size(), 2); + Assert.assertTrue(varMap.get("1").contains("${1}")); + Assert.assertTrue(varMap.get("1").contains("$1")); + Assert.assertEquals(varMap.get("2").size(), 1); + Assert.assertTrue(varMap.get("2").contains("${2}")); + } + + @Test + public void testResolve() throws IOException { + String regexStr = "^/user/(?\\w+)"; + String dstPathStr = "/namenode1/testResolve/$username"; + String settingsStr = null; + RegexMountPoint regexMountPoint = + new RegexMountPoint(inodeTree, regexStr, dstPathStr, settingsStr); + regexMountPoint.initialize(); + InodeTree.ResolveResult resolveResult = + regexMountPoint.resolve("/user/hadoop/file1", true); + Assert.assertEquals(resolveResult.kind, InodeTree.ResultKind.EXTERNAL_DIR); + Assert.assertTrue( + resolveResult.targetFileSystem + instanceof TestRegexMountPointFileSystem); + Assert.assertEquals("/user/hadoop", resolveResult.resolvedPath); + Assert.assertTrue( + resolveResult.targetFileSystem + instanceof TestRegexMountPointFileSystem); + Assert.assertEquals("/namenode1/testResolve/hadoop", + ((TestRegexMountPointFileSystem) resolveResult.targetFileSystem) + .getUri().toString()); + Assert.assertEquals("/file1", resolveResult.remainingPath.toString()); + } + + @Test + public void testResolveWithInterceptor() throws IOException { + String regexStr = "^/user/(?\\w+)"; + String dstPathStr = "/namenode1/testResolve/$username"; + // Replace "_" with "-" + RegexMountPointResolvedDstPathReplaceInterceptor interceptor = + new RegexMountPointResolvedDstPathReplaceInterceptor("_", "-"); + // replaceresolvedpath:_:- + String settingsStr = interceptor.serializeToString(); + RegexMountPoint regexMountPoint = + new RegexMountPoint(inodeTree, regexStr, dstPathStr, settingsStr); + regexMountPoint.initialize(); + InodeTree.ResolveResult resolveResult = + regexMountPoint.resolve("/user/hadoop_user1/file_index", true); + Assert.assertEquals(resolveResult.kind, InodeTree.ResultKind.EXTERNAL_DIR); + Assert.assertTrue( + resolveResult.targetFileSystem + instanceof TestRegexMountPointFileSystem); + Assert.assertEquals("/user/hadoop_user1", resolveResult.resolvedPath); + Assert.assertTrue( + resolveResult.targetFileSystem + instanceof TestRegexMountPointFileSystem); + Assert.assertEquals("/namenode1/testResolve/hadoop-user1", + ((TestRegexMountPointFileSystem) resolveResult.targetFileSystem) + .getUri().toString()); + Assert.assertEquals("/file_index", + resolveResult.remainingPath.toString()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPointInterceptorFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPointInterceptorFactory.java new file mode 100644 index 0000000000000..c567944ffe307 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPointInterceptorFactory.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import org.junit.Assert; +import org.junit.Test; + +/** + * Test Regex Mount Point Interceptor Factory. + */ +public class TestRegexMountPointInterceptorFactory { + + @Test + public void testCreateNormalCase() { + String replaceInterceptorStr = + RegexMountPointInterceptorType.REPLACE_RESOLVED_DST_PATH.getConfigName() + + Character.toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP) + + "src" + Character + .toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP) + "replace"; + RegexMountPointInterceptor interceptor = + RegexMountPointInterceptorFactory.create(replaceInterceptorStr); + Assert.assertTrue( + interceptor + instanceof RegexMountPointResolvedDstPathReplaceInterceptor); + } + + @Test + public void testCreateBadCase() { + String replaceInterceptorStr = + RegexMountPointInterceptorType.REPLACE_RESOLVED_DST_PATH.getConfigName() + + "___" + Character + .toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP) + "src" + + Character.toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP) + + "replace"; + RegexMountPointInterceptor interceptor = + RegexMountPointInterceptorFactory.create(replaceInterceptorStr); + Assert.assertTrue(interceptor == null); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPointResolvedDstPathReplaceInterceptor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPointResolvedDstPathReplaceInterceptor.java new file mode 100644 index 0000000000000..9fdf0f6ac9c5c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestRegexMountPointResolvedDstPathReplaceInterceptor.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; + +import org.junit.Assert; +import org.junit.Test; + +import static org.apache.hadoop.fs.viewfs.RegexMountPointInterceptorType.REPLACE_RESOLVED_DST_PATH; + +/** + * Test RegexMountPointResolvedDstPathReplaceInterceptor. + */ +public class TestRegexMountPointResolvedDstPathReplaceInterceptor { + + public String createSerializedString(String regex, String replaceString) { + return REPLACE_RESOLVED_DST_PATH.getConfigName() + + RegexMountPoint.INTERCEPTOR_INTERNAL_SEP + regex + + RegexMountPoint.INTERCEPTOR_INTERNAL_SEP + replaceString; + } + + @Test + public void testDeserializeFromStringNormalCase() throws IOException { + String srcRegex = "-"; + String replaceString = "_"; + String serializedString = createSerializedString(srcRegex, replaceString); + RegexMountPointResolvedDstPathReplaceInterceptor interceptor = + RegexMountPointResolvedDstPathReplaceInterceptor + .deserializeFromString(serializedString); + Assert.assertEquals(srcRegex, interceptor.getSrcRegexString()); + Assert.assertEquals(replaceString, interceptor.getReplaceString()); + Assert.assertNull(interceptor.getSrcRegexPattern()); + interceptor.initialize(); + Assert.assertEquals(srcRegex, + interceptor.getSrcRegexPattern().toString()); + } + + @Test + public void testDeserializeFromStringBadCase() throws IOException { + String srcRegex = "-"; + String replaceString = "_"; + String serializedString = createSerializedString(srcRegex, replaceString); + serializedString = serializedString + ":ddd"; + RegexMountPointResolvedDstPathReplaceInterceptor interceptor = + RegexMountPointResolvedDstPathReplaceInterceptor + .deserializeFromString(serializedString); + Assert.assertNull(interceptor); + } + + @Test + public void testSerialization() { + String srcRegex = "word1"; + String replaceString = "word2"; + String serializedString = createSerializedString(srcRegex, replaceString); + RegexMountPointResolvedDstPathReplaceInterceptor interceptor = + new RegexMountPointResolvedDstPathReplaceInterceptor(srcRegex, + replaceString); + Assert.assertEquals(interceptor.serializeToString(), serializedString); + } + + @Test + public void testInterceptSource() { + String srcRegex = "word1"; + String replaceString = "word2"; + RegexMountPointResolvedDstPathReplaceInterceptor interceptor = + new RegexMountPointResolvedDstPathReplaceInterceptor(srcRegex, + replaceString); + String sourcePath = "/a/b/l3/dd"; + Assert.assertEquals(sourcePath, interceptor.interceptSource(sourcePath)); + } + + @Test + public void testInterceptResolve() throws IOException { + String pathAfterResolution = "/user-hadoop"; + + String srcRegex = "hadoop"; + String replaceString = "hdfs"; + RegexMountPointResolvedDstPathReplaceInterceptor interceptor = + new RegexMountPointResolvedDstPathReplaceInterceptor(srcRegex, + replaceString); + interceptor.initialize(); + Assert.assertEquals("/user-hdfs", + interceptor.interceptResolvedDestPathStr(pathAfterResolution)); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeCentralMountTableConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeCentralMountTableConfig.java new file mode 100644 index 0000000000000..1527e3c1f30d8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeCentralMountTableConfig.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.junit.Before; + +/** + * Test the TestViewFSOverloadSchemeCentralMountTableConfig with mount-table + * configuration files in configured fs location. + */ +public class TestViewFSOverloadSchemeCentralMountTableConfig + extends TestViewFileSystemOverloadSchemeLocalFileSystem { + private Path oldMountTablePath; + private Path latestMountTablepath; + + @Before + public void setUp() throws Exception { + super.setUp(); + // Mount table name format: mount-table..xml + String mountTableFileName1 = "mount-table.1.xml"; + String mountTableFileName2 = "mount-table.2.xml"; + oldMountTablePath = + new Path(getTestRoot() + File.separator + mountTableFileName1); + latestMountTablepath = + new Path(getTestRoot() + File.separator + mountTableFileName2); + getConf().set(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH, + getTestRoot().toString()); + File f = new File(oldMountTablePath.toUri()); + f.createNewFile(); // Just creating empty mount-table file. + File f2 = new File(latestMountTablepath.toUri()); + latestMountTablepath = new Path(f2.toURI()); + f2.createNewFile(); + } + + /** + * This method saves the mount links in a local files. + */ + @Override + void addMountLinks(String mountTable, String[] sources, String[] targets, + Configuration conf) throws IOException, URISyntaxException { + // we don't use conf here, instead we use config paths to store links. + // Mount-table old version file mount-table-.xml + try (BufferedWriter out = new BufferedWriter( + new FileWriter(new File(oldMountTablePath.toUri())))) { + out.write("\n"); + // Invalid tag. This file should not be read. + out.write(""); + out.write("\n"); + out.flush(); + } + ViewFsTestSetup.addMountLinksToFile(mountTable, sources, targets, + latestMountTablepath, conf); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java new file mode 100644 index 0000000000000..ac7a1a6899425 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * + * Test the TestViewFileSystemOverloadSchemeLF using a file with authority: + * file://mountTableName/ i.e, the authority is used to load a mount table. + */ +public class TestViewFileSystemOverloadSchemeLocalFileSystem { + private static final String FILE = "file"; + private static final Log LOG = + LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class); + private FileSystem fsTarget; + private Configuration conf; + private Path targetTestRoot; + private FileSystemTestHelper fileSystemTestHelper = + new FileSystemTestHelper(); + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + conf.set(String.format("fs.%s.impl", FILE), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, FILE), + LocalFileSystem.class.getName()); + fsTarget = new LocalFileSystem(); + fsTarget.initialize(new URI("file:///"), conf); + // create the test root on local_fs + targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); + fsTarget.delete(targetTestRoot, true); + fsTarget.mkdirs(targetTestRoot); + } + + /** + * Adds the given mount links to config. sources contains mount link src and + * the respective index location in targets contains the target uri. + */ + void addMountLinks(String mountTable, String[] sources, String[] targets, + Configuration config) throws IOException, URISyntaxException { + ViewFsTestSetup.addMountLinksToConf(mountTable, sources, targets, config); + } + + /** + * Tests write file and read file with ViewFileSystemOverloadScheme. + */ + @Test + public void testLocalTargetLinkWriteSimple() + throws IOException, URISyntaxException { + LOG.info("Starting testLocalTargetLinkWriteSimple"); + final String testString = "Hello Local!..."; + final Path lfsRoot = new Path("/lfsRoot"); + addMountLinks(null, new String[] {lfsRoot.toString() }, + new String[] {targetTestRoot + "/local" }, conf); + try (FileSystem lViewFs = FileSystem.get(URI.create("file:///"), conf)) { + final Path testPath = new Path(lfsRoot, "test.txt"); + try (FSDataOutputStream fsDos = lViewFs.create(testPath)) { + fsDos.writeUTF(testString); + } + + try (FSDataInputStream lViewIs = lViewFs.open(testPath)) { + Assert.assertEquals(testString, lViewIs.readUTF()); + } + } + } + + /** + * Tests create file and delete file with ViewFileSystemOverloadScheme. + */ + @Test + public void testLocalFsCreateAndDelete() throws Exception { + LOG.info("Starting testLocalFsCreateAndDelete"); + addMountLinks("mt", new String[] {"/lfsroot" }, + new String[] {targetTestRoot + "/wd2" }, conf); + final URI mountURI = URI.create("file://mt/"); + try (FileSystem lViewFS = FileSystem.get(mountURI, conf)) { + Path testPath = new Path(mountURI.toString() + "/lfsroot/test"); + lViewFS.createNewFile(testPath); + Assert.assertTrue(lViewFS.exists(testPath)); + lViewFS.delete(testPath, true); + Assert.assertFalse(lViewFS.exists(testPath)); + } + } + + /** + * Tests root level file with linkMergeSlash with + * ViewFileSystemOverloadScheme. + */ + @Test + public void testLocalFsLinkSlashMerge() throws Exception { + LOG.info("Starting testLocalFsLinkSlashMerge"); + addMountLinks("mt", + new String[] {Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH }, + new String[] {targetTestRoot + "/wd2" }, conf); + final URI mountURI = URI.create("file://mt/"); + try (FileSystem lViewFS = FileSystem.get(mountURI, conf)) { + Path fileOnRoot = new Path(mountURI.toString() + "/NewFile"); + lViewFS.createNewFile(fileOnRoot); + Assert.assertTrue(lViewFS.exists(fileOnRoot)); + } + } + + /** + * Tests with linkMergeSlash and other mounts in + * ViewFileSystemOverloadScheme. + */ + @Test(expected = IOException.class) + public void testLocalFsLinkSlashMergeWithOtherMountLinks() throws Exception { + LOG.info("Starting testLocalFsLinkSlashMergeWithOtherMountLinks"); + addMountLinks("mt", + new String[] {"/lfsroot", Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH }, + new String[] {targetTestRoot + "/wd2", targetTestRoot + "/wd2" }, conf); + final URI mountURI = URI.create("file://mt/"); + FileSystem.get(mountURI, conf); + Assert.fail("A merge slash cannot be configured with other mount links."); + } + + @After + public void tearDown() throws Exception { + if (null != fsTarget) { + fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true); + fsTarget.close(); + } + } + + /** + * Returns the test root dir. + */ + public Path getTestRoot() { + return this.targetTestRoot; + } + + /** + * Returns the conf. + */ + public Configuration getConf() { + return this.conf; + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java index 136837fc801c4..56f5b2d997dc2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsConfig.java @@ -39,7 +39,7 @@ public void testInvalidConfig() throws IOException, URISyntaxException { class Foo { } - new InodeTree(conf, null) { + new InodeTree(conf, null, null, false) { @Override protected Foo getTargetFileSystem(final URI uri) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java new file mode 100644 index 0000000000000..7afc78981f6e3 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsOverloadSchemeListStatus.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.*; + +/** + * ViewFsOverloadScheme ListStatus. + */ +public class TestViewFsOverloadSchemeListStatus { + + private static final File TEST_DIR = + GenericTestUtils.getTestDir(TestViewfsFileStatus.class.getSimpleName()); + private Configuration conf; + private static final String FILE_NAME = "file"; + + @Before + public void setUp() { + conf = new Configuration(); + conf.set(String.format("fs.%s.impl", FILE_NAME), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String + .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + FILE_NAME), LocalFileSystem.class.getName()); + FileUtil.fullyDelete(TEST_DIR); + assertTrue(TEST_DIR.mkdirs()); + } + + @After + public void tearDown() throws IOException { + FileUtil.fullyDelete(TEST_DIR); + } + + /** + * Tests the ACL and isDirectory returned from listStatus for directories and + * files. + */ + @Test + public void testListStatusACL() throws IOException, URISyntaxException { + String testfilename = "testFileACL"; + String childDirectoryName = "testDirectoryACL"; + TEST_DIR.mkdirs(); + File infile = new File(TEST_DIR, testfilename); + final byte[] content = "dingos".getBytes(); + + try (FileOutputStream fos = new FileOutputStream(infile)) { + fos.write(content); + } + assertEquals(content.length, infile.length()); + File childDir = new File(TEST_DIR, childDirectoryName); + childDir.mkdirs(); + + ConfigUtil.addLink(conf, "/file", infile.toURI()); + ConfigUtil.addLink(conf, "/dir", childDir.toURI()); + + String fileUriStr = "file:///"; + try (FileSystem vfs = FileSystem.get(new URI(fileUriStr), conf)) { + assertEquals(ViewFileSystemOverloadScheme.class, vfs.getClass()); + FileStatus[] statuses = vfs.listStatus(new Path("/")); + + FileSystem localFs = ((ViewFileSystemOverloadScheme) vfs) + .getRawFileSystem(new Path(fileUriStr), conf); + FileStatus fileStat = localFs.getFileStatus(new Path(infile.getPath())); + FileStatus dirStat = localFs.getFileStatus(new Path(childDir.getPath())); + for (FileStatus status : statuses) { + if (status.getPath().getName().equals(FILE_NAME)) { + assertEquals(fileStat.getPermission(), status.getPermission()); + } else { + assertEquals(dirStat.getPermission(), status.getPermission()); + } + } + + localFs.setPermission(new Path(infile.getPath()), + FsPermission.valueOf("-rwxr--r--")); + localFs.setPermission(new Path(childDir.getPath()), + FsPermission.valueOf("-r--rwxr--")); + + statuses = vfs.listStatus(new Path("/")); + for (FileStatus status : statuses) { + if (status.getPath().getName().equals(FILE_NAME)) { + assertEquals(FsPermission.valueOf("-rwxr--r--"), + status.getPermission()); + assertFalse(status.isDirectory()); + } else { + assertEquals(FsPermission.valueOf("-r--rwxr--"), + status.getPermission()); + assertTrue(status.isDirectory()); + } + } + } + } + + /** + * Tests that ViewFSOverloadScheme should consider initialized fs as fallback + * if there are no mount links configured. It should add fallback with the + * chrootedFS at it's uri's root. + */ + @Test(timeout = 30000) + public void testViewFSOverloadSchemeWithoutAnyMountLinks() throws Exception { + Path initUri = new Path(TEST_DIR.toURI().toString(), "init"); + try (FileSystem fs = FileSystem.get(initUri.toUri(), conf)) { + ViewFileSystemOverloadScheme vfs = (ViewFileSystemOverloadScheme) fs; + assertEquals(0, vfs.getMountPoints().length); + Path testOnFallbackPath = new Path(TEST_DIR.toURI().toString(), "test"); + assertTrue(vfs.mkdirs(testOnFallbackPath)); + FileStatus[] status = vfs.listStatus(testOnFallbackPath.getParent()); + assertEquals(Path.getPathWithoutSchemeAndAuthority(testOnFallbackPath), + Path.getPathWithoutSchemeAndAuthority(status[0].getPath())); + //Check directly on localFS. The fallBackFs(localFS) should be chrooted + //at it's root. So, after + FileSystem lfs = vfs.getRawFileSystem(testOnFallbackPath, conf); + FileStatus[] statusOnLocalFS = + lfs.listStatus(testOnFallbackPath.getParent()); + assertEquals(testOnFallbackPath.getName(), + statusOnLocalFS[0].getPath().getName()); + //initUri should not have exist in lfs, as it would have chrooted on it's + // root only. + assertFalse(lfs.exists(initUri)); + } + } + + @AfterClass + public static void cleanup() throws IOException { + FileUtil.fullyDelete(TEST_DIR); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java index 2e498f2c0a023..fd5de72ed71ad 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAuthorityLocalFs.java @@ -48,10 +48,9 @@ public void setUp() throws Exception { fcTarget = FileContext.getLocalFSFileContext(); super.setUp(); // this sets up conf (and fcView which we replace) - // Now create a viewfs using a mount table called "default" - // hence viewfs://default/ + // Now create a viewfs using a mount table using the {MOUNT_TABLE_NAME} schemeWithAuthority = - new URI(FsConstants.VIEWFS_SCHEME, "default", "/", null, null); + new URI(FsConstants.VIEWFS_SCHEME, MOUNT_TABLE_NAME, "/", null, null); fcView = FileContext.getFileContext(schemeWithAuthority, conf); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java index 0c31c8ed6a901..8ac447eb02e9b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewfsFileStatus.java @@ -29,10 +29,13 @@ import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; import org.junit.AfterClass; +import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -48,6 +51,17 @@ public class TestViewfsFileStatus { private static final File TEST_DIR = GenericTestUtils.getTestDir( TestViewfsFileStatus.class.getSimpleName()); + @Before + public void setUp() { + FileUtil.fullyDelete(TEST_DIR); + assertTrue(TEST_DIR.mkdirs()); + } + + @After + public void tearDown() throws IOException { + FileUtil.fullyDelete(TEST_DIR); + } + @Test public void testFileStatusSerialziation() throws IOException, URISyntaxException { @@ -56,38 +70,92 @@ public void testFileStatusSerialziation() File infile = new File(TEST_DIR, testfilename); final byte[] content = "dingos".getBytes(); - FileOutputStream fos = null; - try { - fos = new FileOutputStream(infile); + try (FileOutputStream fos = new FileOutputStream(infile)) { fos.write(content); - } finally { - if (fos != null) { - fos.close(); - } } assertEquals((long)content.length, infile.length()); Configuration conf = new Configuration(); ConfigUtil.addLink(conf, "/foo/bar/baz", TEST_DIR.toURI()); - FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf); - assertEquals(ViewFileSystem.class, vfs.getClass()); - Path path = new Path("/foo/bar/baz", testfilename); - FileStatus stat = vfs.getFileStatus(path); - assertEquals(content.length, stat.getLen()); - ContractTestUtils.assertNotErasureCoded(vfs, path); - assertTrue(path + " should have erasure coding unset in " + - "FileStatus#toString(): " + stat, - stat.toString().contains("isErasureCoded=false")); - - // check serialization/deserialization - DataOutputBuffer dob = new DataOutputBuffer(); - stat.write(dob); - DataInputBuffer dib = new DataInputBuffer(); - dib.reset(dob.getData(), 0, dob.getLength()); - FileStatus deSer = new FileStatus(); - deSer.readFields(dib); - assertEquals(content.length, deSer.getLen()); - assertFalse(deSer.isErasureCoded()); + try (FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { + assertEquals(ViewFileSystem.class, vfs.getClass()); + Path path = new Path("/foo/bar/baz", testfilename); + FileStatus stat = vfs.getFileStatus(path); + assertEquals(content.length, stat.getLen()); + ContractTestUtils.assertNotErasureCoded(vfs, path); + assertTrue(path + " should have erasure coding unset in " + + "FileStatus#toString(): " + stat, + stat.toString().contains("isErasureCoded=false")); + + // check serialization/deserialization + DataOutputBuffer dob = new DataOutputBuffer(); + stat.write(dob); + DataInputBuffer dib = new DataInputBuffer(); + dib.reset(dob.getData(), 0, dob.getLength()); + FileStatus deSer = new FileStatus(); + deSer.readFields(dib); + assertEquals(content.length, deSer.getLen()); + assertFalse(deSer.isErasureCoded()); + } + } + + /** + * Tests the ACL returned from getFileStatus for directories and files. + * @throws IOException + */ + @Test + public void testListStatusACL() throws IOException { + String testfilename = "testFileACL"; + String childDirectoryName = "testDirectoryACL"; + TEST_DIR.mkdirs(); + File infile = new File(TEST_DIR, testfilename); + final byte[] content = "dingos".getBytes(); + + try (FileOutputStream fos = new FileOutputStream(infile)) { + fos.write(content); + } + assertEquals(content.length, infile.length()); + File childDir = new File(TEST_DIR, childDirectoryName); + childDir.mkdirs(); + + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/file", infile.toURI()); + ConfigUtil.addLink(conf, "/dir", childDir.toURI()); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + try (FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { + assertEquals(ViewFileSystem.class, vfs.getClass()); + FileStatus[] statuses = vfs.listStatus(new Path("/")); + + FileSystem localFs = FileSystem.getLocal(conf); + FileStatus fileStat = localFs.getFileStatus(new Path(infile.getPath())); + FileStatus dirStat = localFs.getFileStatus(new Path(childDir.getPath())); + + for (FileStatus status : statuses) { + if (status.getPath().getName().equals("file")) { + assertEquals(fileStat.getPermission(), status.getPermission()); + } else { + assertEquals(dirStat.getPermission(), status.getPermission()); + } + } + + localFs.setPermission(new Path(infile.getPath()), + FsPermission.valueOf("-rwxr--r--")); + localFs.setPermission(new Path(childDir.getPath()), + FsPermission.valueOf("-r--rwxr--")); + + statuses = vfs.listStatus(new Path("/")); + for (FileStatus status : statuses) { + if (status.getPath().getName().equals("file")) { + assertEquals(FsPermission.valueOf("-rwxr--r--"), + status.getPermission()); + assertFalse(status.isDirectory()); + } else { + assertEquals(FsPermission.valueOf("-r--rwxr--"), + status.getPermission()); + assertTrue(status.isDirectory()); + } + } + } } // Tests that ViewFileSystem.getFileChecksum calls res.targetFileSystem @@ -97,8 +165,8 @@ public void testGetFileChecksum() throws IOException { final Path path = new Path("/tmp/someFile"); FileSystem mockFS = Mockito.mock(FileSystem.class); InodeTree.ResolveResult res = - new InodeTree.ResolveResult(null, mockFS , null, - new Path("someFile")); + new InodeTree.ResolveResult(null, mockFS, null, + new Path("someFile"), true); @SuppressWarnings("unchecked") InodeTree fsState = Mockito.mock(InodeTree.class); Mockito.when(fsState.resolve(path.toString(), true)).thenReturn(res); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index 4902d733e954b..05d7974395013 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.fs.viewfs; +import java.io.File; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; import java.net.URI; import java.security.PrivilegedExceptionAction; @@ -32,6 +34,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStoragePolicySpi; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; @@ -57,6 +61,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assume; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.fs.FileSystemTestHelper.*; @@ -109,6 +115,9 @@ protected FileSystemTestHelper createFileSystemHelper() { return new FileSystemTestHelper(); } + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + @Before public void setUp() throws Exception { initializeTargetTestRoot(); @@ -1279,7 +1288,8 @@ public void testLinkTarget() throws Exception { @Test public void testViewFileSystemInnerCache() throws Exception { - ViewFileSystem.InnerCache cache = new ViewFileSystem.InnerCache(); + ViewFileSystem.InnerCache cache = + new ViewFileSystem.InnerCache(new FsGetter()); FileSystem fs = cache.get(fsTarget.getUri(), conf); // InnerCache caches filesystem. @@ -1368,4 +1378,56 @@ public void testDeleteOnExit() throws Exception { viewFs.close(); assertFalse(fsTarget.exists(realTestPath)); } + + @Test + public void testGetContentSummary() throws IOException { + ContentSummary summaryBefore = + fsView.getContentSummary(new Path("/internalDir")); + String expected = "GET CONTENT SUMMARY"; + Path filePath = + new Path("/internalDir/internalDir2/linkToDir3", "foo"); + + try (FSDataOutputStream outputStream = fsView.create(filePath)) { + outputStream.write(expected.getBytes()); + } + + Path newDirPath = new Path("/internalDir/linkToDir2", "bar"); + fsView.mkdirs(newDirPath); + + ContentSummary summaryAfter = + fsView.getContentSummary(new Path("/internalDir")); + assertEquals("The file count didn't match", + summaryBefore.getFileCount() + 1, + summaryAfter.getFileCount()); + assertEquals("The size didn't match", + summaryBefore.getLength() + expected.length(), + summaryAfter.getLength()); + assertEquals("The directory count didn't match", + summaryBefore.getDirectoryCount() + 1, + summaryAfter.getDirectoryCount()); + } + + @Test + public void testGetContentSummaryWithFileInLocalFS() throws Exception { + ContentSummary summaryBefore = + fsView.getContentSummary(new Path("/internalDir")); + String expected = "GET CONTENT SUMMARY"; + File localFile = temporaryFolder.newFile("localFile"); + try (FileOutputStream fos = new FileOutputStream(localFile)) { + fos.write(expected.getBytes()); + } + ConfigUtil.addLink(conf, + "/internalDir/internalDir2/linkToLocalFile", localFile.toURI()); + + try (FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, conf)) { + ContentSummary summaryAfter = + fs.getContentSummary(new Path("/internalDir")); + assertEquals("The file count didn't match", + summaryBefore.getFileCount() + 1, + summaryAfter.getFileCount()); + assertEquals("The directory count didn't match", + summaryBefore.getLength() + expected.length(), + summaryAfter.getLength()); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java index d96cdb172b702..73f6265eee72c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java @@ -56,6 +56,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.FileContextTestHelper.fileType; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; @@ -69,6 +70,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -95,6 +97,8 @@ *

    */ abstract public class ViewFsBaseTest { + protected static final String MOUNT_TABLE_NAME = "mycluster"; + FileContext fcView; // the view file system - the mounts are here FileContext fcTarget; // the target file system - the mount will point here Path targetTestRoot; @@ -128,6 +132,9 @@ public void setUp() throws Exception { // Set up the defaultMT in the config with our mount point links conf = new Configuration(); + conf.set( + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + MOUNT_TABLE_NAME); ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri()); ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri()); @@ -540,8 +547,8 @@ public void testGetFileChecksum() throws AccessControlException, UnresolvedLinkException, IOException, URISyntaxException { AbstractFileSystem mockAFS = mock(AbstractFileSystem.class); InodeTree.ResolveResult res = - new InodeTree.ResolveResult(null, mockAFS , null, - new Path("someFile")); + new InodeTree.ResolveResult(null, mockAFS, null, + new Path("someFile"), true); @SuppressWarnings("unchecked") InodeTree fsState = mock(InodeTree.class); when(fsState.resolve(anyString(), anyBoolean())).thenReturn(res); @@ -1001,4 +1008,23 @@ static AbstractFileSystem getMockFs(URI uri) { return mockFs; } } + + @Test + public void testListStatusWithNoGroups() throws Exception { + final UserGroupInformation userUgi = UserGroupInformation + .createUserForTesting("user@HADOOP.COM", new String[] {}); + userUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + URI viewFsUri = new URI( + FsConstants.VIEWFS_SCHEME, MOUNT_TABLE_NAME, "/", null, null); + FileSystem vfs = FileSystem.get(viewFsUri, conf); + LambdaTestUtils.intercept(IOException.class, + "There is no primary group for UGI", () -> vfs + .listStatus(new Path(viewFsUri.toString() + "internalDir"))); + return null; + } + }); + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java index 9b7e17f4a601a..b2d7416aa7675 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java @@ -17,16 +17,21 @@ */ package org.apache.hadoop.fs.viewfs; +import java.io.IOException; import java.net.URI; +import java.net.URISyntaxException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestHelper; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.viewfs.ConfigUtil; +import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme.ChildFsGetter; import org.apache.hadoop.util.Shell; import org.eclipse.jetty.util.log.Log; +import org.junit.Assert; /** @@ -132,4 +137,84 @@ static void linkUpFirstComponents(Configuration conf, String path, + firstComponent + "->" + linkTarget); } + /** + * Adds the given mount links to the given Hadoop compatible file system path. + * Mount link mappings are in sources, targets at their respective index + * locations. + */ + static void addMountLinksToFile(String mountTable, String[] sources, + String[] targets, Path mountTableConfPath, Configuration conf) + throws IOException, URISyntaxException { + ChildFsGetter cfs = new ViewFileSystemOverloadScheme.ChildFsGetter( + mountTableConfPath.toUri().getScheme()); + try (FileSystem fs = cfs.getNewInstance(mountTableConfPath.toUri(), + conf)) { + try (FSDataOutputStream out = fs.create(mountTableConfPath)) { + String prefix = + new StringBuilder(Constants.CONFIG_VIEWFS_PREFIX).append(".") + .append((mountTable == null + ? ConfigUtil.getDefaultMountTableName(conf) + : mountTable)) + .append(".").toString(); + out.writeBytes(""); + for (int i = 0; i < sources.length; i++) { + String src = sources[i]; + String target = targets[i]; + boolean isNfly = src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY); + out.writeBytes(""); + if (isNfly) { + String[] srcParts = src.split("[.]"); + Assert.assertEquals("Invalid NFlyLink format", 3, srcParts.length); + String actualSrc = srcParts[srcParts.length - 1]; + String params = srcParts[srcParts.length - 2]; + out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_NFLY + "." + + params + "." + actualSrc); + } else if (Constants.CONFIG_VIEWFS_LINK_FALLBACK.equals(src)) { + out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_FALLBACK); + } else if (Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH.equals(src)) { + out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH); + } else { + out.writeBytes(prefix + Constants.CONFIG_VIEWFS_LINK + "." + src); + } + out.writeBytes(""); + out.writeBytes(""); + out.writeBytes(target); + out.writeBytes(""); + out.flush(); + } + out.writeBytes(("")); + out.flush(); + } + } + } + + /** + * Adds the given mount links to the configuration. Mount link mappings are + * in sources, targets at their respective index locations. + */ + public static void addMountLinksToConf(String mountTable, String[] sources, + String[] targets, Configuration config) throws URISyntaxException { + for (int i = 0; i < sources.length; i++) { + String src = sources[i]; + String target = targets[i]; + String mountTableName = mountTable == null ? + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE : mountTable; + boolean isNfly = src.startsWith(Constants.CONFIG_VIEWFS_LINK_NFLY); + if (isNfly) { + String[] srcParts = src.split("[.]"); + Assert.assertEquals("Invalid NFlyLink format", 3, srcParts.length); + String actualSrc = srcParts[srcParts.length - 1]; + String params = srcParts[srcParts.length - 2]; + ConfigUtil.addLinkNfly(config, mountTableName, actualSrc, params, + target); + } else if (src.equals(Constants.CONFIG_VIEWFS_LINK_FALLBACK)) { + ConfigUtil.addLinkFallback(config, mountTableName, new URI(target)); + } else if (src.equals(Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH)) { + ConfigUtil.addLinkMergeSlash(config, mountTableName, new URI(target)); + } else { + ConfigUtil.addLink(config, mountTableName, src, new URI(target)); + } + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java index be6181157c0dd..666000b2f48ae 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java @@ -51,7 +51,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Copy-paste of ClientBase from ZooKeeper, but without any of the diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java index 6505fbb8224f8..b5739f7935ed7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java @@ -28,14 +28,14 @@ import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.mockito.Mockito; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -119,7 +119,7 @@ private InetSocketAddress startAndGetRPCServerAddress(InetSocketAddress serverAd try { RPC.setProtocolEngine(conf, - HAServiceProtocolPB.class, ProtobufRpcEngine.class); + HAServiceProtocolPB.class, ProtobufRpcEngine2.class); HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB(new MockHAProtocolImpl()); BlockingService haPbService = HAServiceProtocolService diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java index f63d267f291c0..3c9713bf5fa1d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java @@ -37,8 +37,8 @@ import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.server.ZooKeeperServer; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java index bbef9ef65b326..badd5afc5e91b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java @@ -37,7 +37,7 @@ import org.mockito.AdditionalMatchers; import org.mockito.Mockito; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.slf4j.event.Level; /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java index 791aaad59e990..3f027fa1c598a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java @@ -177,7 +177,7 @@ public void testFailoverFromFaultyServiceSucceeds() throws Exception { } // svc1 still thinks it's active, that's OK, it was fenced - assertEquals(1, AlwaysSucceedFencer.fenceCalled); + assertEquals(2, AlwaysSucceedFencer.fenceCalled); assertSame(svc1, AlwaysSucceedFencer.fencedSvc); assertEquals(HAServiceState.ACTIVE, svc1.state); assertEquals(HAServiceState.ACTIVE, svc2.state); @@ -201,7 +201,7 @@ public void testFailoverFromFaultyServiceFencingFailure() throws Exception { } assertEquals(1, AlwaysFailFencer.fenceCalled); - assertSame(svc1, AlwaysFailFencer.fencedSvc); + assertSame(svc2, AlwaysFailFencer.fencedSvc); assertEquals(HAServiceState.ACTIVE, svc1.state); assertEquals(HAServiceState.STANDBY, svc2.state); } @@ -223,7 +223,7 @@ public void testFencingFailureDuringFailover() throws Exception { // If fencing was requested and it failed we don't try to make // svc2 active anyway, and we don't failback to svc1. assertEquals(1, AlwaysFailFencer.fenceCalled); - assertSame(svc1, AlwaysFailFencer.fencedSvc); + assertSame(svc2, AlwaysFailFencer.fencedSvc); assertEquals(HAServiceState.STANDBY, svc1.state); assertEquals(HAServiceState.STANDBY, svc2.state); } @@ -344,7 +344,7 @@ public void testWeFenceOnFailbackIfTransitionToActiveFails() throws Exception { // and we didn't force it, so we failed back to svc1 and fenced svc2. // Note svc2 still thinks it's active, that's OK, we fenced it. assertEquals(HAServiceState.ACTIVE, svc1.state); - assertEquals(1, AlwaysSucceedFencer.fenceCalled); + assertEquals(2, AlwaysSucceedFencer.fenceCalled); assertSame(svc2, AlwaysSucceedFencer.fencedSvc); } @@ -373,7 +373,7 @@ public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception { // so we did not failback to svc1, ie it's still standby. assertEquals(HAServiceState.STANDBY, svc1.state); assertEquals(1, AlwaysFailFencer.fenceCalled); - assertSame(svc2, AlwaysFailFencer.fencedSvc); + assertSame(svc1, AlwaysFailFencer.fencedSvc); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java index 63b9c63646d8b..13f7eccd55aea 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java @@ -30,8 +30,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Charsets; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestNodeFencer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestNodeFencer.java index b929bcb2490b0..972113eefa91f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestNodeFencer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestNodeFencer.java @@ -29,7 +29,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestNodeFencer { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java index 3a2cf052a60a8..dcff9e30cdba2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import java.util.List; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.util.Shell; @@ -163,6 +163,37 @@ public void testTargetAsEnvironment() { } } + /** + * Test if fencing target has peer set, the failover can trigger different + * commands on source and destination respectively. + */ + @Test + public void testEnvironmentWithPeer() { + HAServiceTarget target = new DummyHAService(HAServiceState.ACTIVE, + new InetSocketAddress("dummytarget", 1111)); + HAServiceTarget source = new DummyHAService(HAServiceState.STANDBY, + new InetSocketAddress("dummysource", 2222)); + target.setTransitionTargetHAStatus(HAServiceState.ACTIVE); + source.setTransitionTargetHAStatus(HAServiceState.STANDBY); + String cmd = "echo $target_host $target_port," + + "echo $source_host $source_port"; + if (!Shell.WINDOWS) { + fencer.tryFence(target, cmd); + Mockito.verify(ShellCommandFencer.LOG).info( + Mockito.contains("echo $ta...rget_port: dummytarget 1111")); + fencer.tryFence(source, cmd); + Mockito.verify(ShellCommandFencer.LOG).info( + Mockito.contains("echo $so...urce_port: dummysource 2222")); + } else { + fencer.tryFence(target, cmd); + Mockito.verify(ShellCommandFencer.LOG).info( + Mockito.contains("echo %ta...get_port%: dummytarget 1111")); + fencer.tryFence(source, cmd); + Mockito.verify(ShellCommandFencer.LOG).info( + Mockito.contains("echo %so...urce_port%: dummysource 2222")); + } + } + /** * Test that we properly close off our input to the subprocess diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java index cc1174b2d2c72..e0169f5fc70d3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java @@ -21,7 +21,7 @@ import java.security.NoSuchAlgorithmException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java index e88eba342874c..039fae0195730 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServerConfigs.java @@ -18,7 +18,7 @@ package org.apache.hadoop.http; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.io.File; import java.io.IOException; import java.net.URI; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index 59856a4de11f9..0fb20acf4c8e9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -23,8 +23,8 @@ import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; import java.util.Random; -import com.google.common.base.Charsets; -import com.google.common.primitives.Bytes; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; import org.junit.Test; import static org.assertj.core.api.Assertions.assertThat; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java index 35f84b950e427..c016ff0378957 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java @@ -39,10 +39,10 @@ import org.apache.log4j.Logger; import org.junit.Assert; -import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import static org.junit.Assert.*; public class CompressDecompressTester { @@ -79,27 +79,6 @@ public ImmutableList> filterOnAssumeWhat( }; } - private static boolean isNativeSnappyLoadable() { - boolean snappyAvailable = false; - boolean loaded = false; - try { - System.loadLibrary("snappy"); - logger.warn("Snappy native library is available"); - snappyAvailable = true; - boolean hadoopNativeAvailable = NativeCodeLoader.isNativeCodeLoaded(); - loaded = snappyAvailable && hadoopNativeAvailable; - if (loaded) { - logger.info("Snappy native library loaded"); - } else { - logger.warn("Snappy native library not loaded"); - } - } catch (Throwable t) { - logger.warn("Failed to load snappy: ", t); - return false; - } - return loaded; - } - public static CompressDecompressTester of( byte[] rawData) { return new CompressDecompressTester(rawData); @@ -126,7 +105,7 @@ private void addPair(T compressor, E decompressor, String name) { builder.add(new TesterPair(name, compressor, decompressor)); } - public void test() throws InstantiationException, IllegalAccessException { + public void test() throws Exception { pairs = builder.build(); pairs = assertionDelegate.filterOnAssumeWhat(pairs); @@ -287,47 +266,45 @@ private boolean checkSetInputArrayIndexOutOfBoundsException( @Override public void assertCompression(String name, Compressor compressor, - Decompressor decompressor, byte[] rawData) { + Decompressor decompressor, byte[] rawData) throws Exception { int cSize = 0; int decompressedSize = 0; - byte[] compressedResult = new byte[rawData.length]; + // Snappy compression can increase data size + int maxCompressedLength = 32 + rawData.length + rawData.length/6; + byte[] compressedResult = new byte[maxCompressedLength]; byte[] decompressedBytes = new byte[rawData.length]; - try { - assertTrue( - joiner.join(name, "compressor.needsInput before error !!!"), - compressor.needsInput()); - assertTrue( + assertTrue( + joiner.join(name, "compressor.needsInput before error !!!"), + compressor.needsInput()); + assertEquals( joiner.join(name, "compressor.getBytesWritten before error !!!"), - compressor.getBytesWritten() == 0); - compressor.setInput(rawData, 0, rawData.length); - compressor.finish(); - while (!compressor.finished()) { - cSize += compressor.compress(compressedResult, 0, - compressedResult.length); - } - compressor.reset(); - - assertTrue( - joiner.join(name, "decompressor.needsInput() before error !!!"), - decompressor.needsInput()); - decompressor.setInput(compressedResult, 0, cSize); - assertFalse( - joiner.join(name, "decompressor.needsInput() after error !!!"), - decompressor.needsInput()); - while (!decompressor.finished()) { - decompressedSize = decompressor.decompress(decompressedBytes, 0, - decompressedBytes.length); - } - decompressor.reset(); - assertTrue(joiner.join(name, " byte size not equals error !!!"), - decompressedSize == rawData.length); - assertArrayEquals( - joiner.join(name, " byte arrays not equals error !!!"), rawData, - decompressedBytes); - } catch (Exception ex) { - fail(joiner.join(name, ex.getMessage())); + 0, compressor.getBytesWritten()); + compressor.setInput(rawData, 0, rawData.length); + compressor.finish(); + while (!compressor.finished()) { + cSize += compressor.compress(compressedResult, 0, + compressedResult.length); + } + compressor.reset(); + + assertTrue( + joiner.join(name, "decompressor.needsInput() before error !!!"), + decompressor.needsInput()); + decompressor.setInput(compressedResult, 0, cSize); + assertFalse( + joiner.join(name, "decompressor.needsInput() after error !!!"), + decompressor.needsInput()); + while (!decompressor.finished()) { + decompressedSize = decompressor.decompress(decompressedBytes, 0, + decompressedBytes.length); } + decompressor.reset(); + assertEquals(joiner.join(name, " byte size not equals error !!!"), + rawData.length, decompressedSize); + assertArrayEquals( + joiner.join(name, " byte arrays not equals error !!!"), rawData, + decompressedBytes); } }), @@ -434,7 +411,7 @@ public void assertCompression(String name, Compressor compressor, joiner.join(name, "byte arrays not equals error !!!"), originalRawData, decompressOut.toByteArray()); } catch (Exception ex) { - fail(joiner.join(name, ex.getMessage())); + throw new AssertionError(name + ex, ex); } finally { try { compressedOut.close(); @@ -496,8 +473,7 @@ public String getName() { private static boolean isAvailable(TesterPair pair) { Compressor compressor = pair.compressor; - if (compressor.getClass().isAssignableFrom(Lz4Compressor.class) - && (NativeCodeLoader.isNativeCodeLoaded())) + if (compressor.getClass().isAssignableFrom(Lz4Compressor.class)) return true; else if (compressor.getClass().isAssignableFrom(BuiltInZlibDeflater.class) @@ -506,11 +482,10 @@ else if (compressor.getClass().isAssignableFrom(BuiltInZlibDeflater.class) else if (compressor.getClass().isAssignableFrom(ZlibCompressor.class)) { return ZlibFactory.isNativeZlibLoaded(new Configuration()); - } - else if (compressor.getClass().isAssignableFrom(SnappyCompressor.class) - && isNativeSnappyLoadable()) + } else if (compressor.getClass().isAssignableFrom(SnappyCompressor.class)) { return true; - + } + return false; } @@ -519,6 +494,6 @@ abstract static class TesterCompressionStrategy { protected final Logger logger = Logger.getLogger(getClass()); abstract void assertCompression(String name, Compressor compressor, - Decompressor decompressor, byte[] originalRawData); + Decompressor decompressor, byte[] originalRawData) throws Exception; } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java index 94ff7a88493c7..02785a3da030e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java @@ -76,8 +76,6 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.ReflectionUtils; import org.junit.After; -import org.junit.Assert; -import org.junit.Assume; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -135,30 +133,22 @@ public void testBZip2NativeCodec() throws IOException { @Test public void testSnappyCodec() throws IOException { - if (SnappyCodec.isNativeCodeLoaded()) { - codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.SnappyCodec"); - codecTest(conf, seed, count, "org.apache.hadoop.io.compress.SnappyCodec"); - } + codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.SnappyCodec"); + codecTest(conf, seed, count, "org.apache.hadoop.io.compress.SnappyCodec"); } @Test public void testLz4Codec() throws IOException { - if (NativeCodeLoader.isNativeCodeLoaded()) { - if (Lz4Codec.isNativeCodeLoaded()) { - conf.setBoolean( - CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY, - false); - codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec"); - codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec"); - conf.setBoolean( - CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY, - true); - codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec"); - codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec"); - } else { - Assert.fail("Native hadoop library available but lz4 not"); - } - } + conf.setBoolean( + CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY, + false); + codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec"); + codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec"); + conf.setBoolean( + CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY, + true); + codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec"); + codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec"); } @Test @@ -614,7 +604,6 @@ private static void sequenceFileCodecTest(Configuration conf, int lines, */ @Test public void testSnappyMapFile() throws Exception { - Assume.assumeTrue(SnappyCodec.isNativeCodeLoaded()); codecTestMapFile(SnappyCodec.class, CompressionType.BLOCK, 100); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressorDecompressor.java index 1f035974883cf..43cb4df1105b2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressorDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressorDecompressor.java @@ -27,7 +27,7 @@ import org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; /** * Test for pairs: @@ -72,20 +72,23 @@ public void testCompressorDecompressor() { } @Test - public void testCompressorDecompressorWithExeedBufferLimit() { - int BYTE_SIZE = 100 * 1024; - byte[] rawData = generate(BYTE_SIZE); + public void testCompressorDecompressorWithExceedBufferLimit() { + // input data size greater than internal buffer size. + final int byteSize = 100 * 1024; + final int bufferSize = 64 * 1024; + byte[] rawData = generate(byteSize); try { CompressDecompressTester.of(rawData) .withCompressDecompressPair( - new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2), - new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2)) - .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE), - new Lz4Decompressor(BYTE_SIZE)) - .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK, - CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK, - CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS, - CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM)) + new SnappyCompressor(bufferSize), + new SnappyDecompressor(bufferSize)) + .withCompressDecompressPair( + new Lz4Compressor(bufferSize), + new Lz4Decompressor(bufferSize)) + .withTestCases(ImmutableSet.of( + CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK, + CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS, + CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM)) .test(); } catch (Exception ex) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java index 6f3b076097aee..8be5ec3d3f78f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java @@ -27,17 +27,20 @@ import java.io.IOException; import java.util.Random; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.compress.BlockCompressorStream; import org.apache.hadoop.io.compress.BlockDecompressorStream; import org.apache.hadoop.io.compress.CompressionInputStream; import org.apache.hadoop.io.compress.CompressionOutputStream; -import org.apache.hadoop.io.compress.Lz4Codec; import org.apache.hadoop.io.compress.lz4.Lz4Compressor; import org.apache.hadoop.io.compress.lz4.Lz4Decompressor; import org.apache.hadoop.test.MultithreadedTestUtil; -import org.junit.Before; import org.junit.Test; import static org.junit.Assume.*; @@ -45,12 +48,7 @@ public class TestLz4CompressorDecompressor { private static final Random rnd = new Random(12345l); - @Before - public void before() { - assumeTrue(Lz4Codec.isNativeCodeLoaded()); - } - - //test on NullPointerException in {@code compressor.setInput()} + //test on NullPointerException in {@code compressor.setInput()} @Test public void testCompressorSetInputNullPointerException() { try { @@ -330,4 +328,36 @@ public void doWork() throws Exception { ctx.waitFor(60000); } + + @Test + public void testLz4Compatibility() throws Exception { + // The sequence file was created using native Lz4 codec before HADOOP-17292. + // After we use lz4-java for lz4 compression, this test makes sure we can + // decompress the sequence file correctly. + Path filePath = new Path(TestLz4CompressorDecompressor.class + .getResource("/lz4/sequencefile").toURI()); + + Configuration conf = new Configuration(); + conf.setInt("io.seqfile.compress.blocksize", 1000); + FileSystem fs = FileSystem.get(conf); + + int lines = 2000; + + SequenceFile.Reader reader = new SequenceFile.Reader(fs, filePath, conf); + + Writable key = (Writable)reader.getKeyClass().newInstance(); + Writable value = (Writable)reader.getValueClass().newInstance(); + + int lc = 0; + try { + while (reader.next(key, value)) { + assertEquals("key" + lc, key.toString()); + assertEquals("value" + lc, value.toString()); + lc++; + } + } finally { + reader.close(); + } + assertEquals(lines, lc); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java index cc986c7e0aea4..93c24835f2206 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.io.compress.snappy; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -32,26 +33,28 @@ import java.nio.ByteBuffer; import java.util.Random; +import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.compress.BlockCompressorStream; import org.apache.hadoop.io.compress.BlockDecompressorStream; import org.apache.hadoop.io.compress.CompressionInputStream; import org.apache.hadoop.io.compress.CompressionOutputStream; -import org.apache.hadoop.io.compress.SnappyCodec; import org.apache.hadoop.io.compress.snappy.SnappyDecompressor.SnappyDirectDecompressor; import org.apache.hadoop.test.MultithreadedTestUtil; import org.junit.Assert; import org.junit.Before; import org.junit.Test; - -import static org.junit.Assume.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestSnappyCompressorDecompressor { + public static final Logger LOG = + LoggerFactory.getLogger(TestSnappyCompressorDecompressor.class); + @Before public void before() { - assumeTrue(SnappyCodec.isNativeCodeLoaded()); } @Test @@ -167,40 +170,41 @@ public void testSnappyDecompressorCompressAIOBException() { } @Test - public void testSnappyCompressDecompress() { + public void testSnappyCompressDecompress() throws Exception { int BYTE_SIZE = 1024 * 54; byte[] bytes = BytesGenerator.get(BYTE_SIZE); SnappyCompressor compressor = new SnappyCompressor(); - try { - compressor.setInput(bytes, 0, bytes.length); - assertTrue("SnappyCompressDecompress getBytesRead error !!!", - compressor.getBytesRead() > 0); - assertTrue( - "SnappyCompressDecompress getBytesWritten before compress error !!!", - compressor.getBytesWritten() == 0); - - byte[] compressed = new byte[BYTE_SIZE]; - int cSize = compressor.compress(compressed, 0, compressed.length); - assertTrue( - "SnappyCompressDecompress getBytesWritten after compress error !!!", - compressor.getBytesWritten() > 0); - - SnappyDecompressor decompressor = new SnappyDecompressor(BYTE_SIZE); - // set as input for decompressor only compressed data indicated with cSize - decompressor.setInput(compressed, 0, cSize); - byte[] decompressed = new byte[BYTE_SIZE]; - decompressor.decompress(decompressed, 0, decompressed.length); - - assertTrue("testSnappyCompressDecompress finished error !!!", - decompressor.finished()); - Assert.assertArrayEquals(bytes, decompressed); - compressor.reset(); - decompressor.reset(); - assertTrue("decompressor getRemaining error !!!", - decompressor.getRemaining() == 0); - } catch (Exception e) { - fail("testSnappyCompressDecompress ex error!!!"); - } + compressor.setInput(bytes, 0, bytes.length); + assertTrue("SnappyCompressDecompress getBytesRead error !!!", + compressor.getBytesRead() > 0); + assertEquals( + "SnappyCompressDecompress getBytesWritten before compress error !!!", + 0, compressor.getBytesWritten()); + + // snappy compression may increase data size. + // This calculation comes from "Snappy::MaxCompressedLength(size_t)" + int maxSize = 32 + BYTE_SIZE + BYTE_SIZE / 6; + byte[] compressed = new byte[maxSize]; + int cSize = compressor.compress(compressed, 0, compressed.length); + LOG.info("input size: {}", BYTE_SIZE); + LOG.info("compressed size: {}", cSize); + assertTrue( + "SnappyCompressDecompress getBytesWritten after compress error !!!", + compressor.getBytesWritten() > 0); + + SnappyDecompressor decompressor = new SnappyDecompressor(); + // set as input for decompressor only compressed data indicated with cSize + decompressor.setInput(compressed, 0, cSize); + byte[] decompressed = new byte[BYTE_SIZE]; + decompressor.decompress(decompressed, 0, decompressed.length); + + assertTrue("testSnappyCompressDecompress finished error !!!", + decompressor.finished()); + Assert.assertArrayEquals(bytes, decompressed); + compressor.reset(); + decompressor.reset(); + assertEquals("decompressor getRemaining error !!!", + 0, decompressor.getRemaining()); } @Test @@ -278,7 +282,38 @@ public void testSnappyBlockCompression() { fail("testSnappyBlockCompression ex error !!!"); } } - + + @Test + // The buffer size is smaller than the input. + public void testSnappyCompressDecompressWithSmallBuffer() throws Exception { + int inputSize = 1024 * 50; + int bufferSize = 512; + ByteArrayOutputStream out = new ByteArrayOutputStream(); + byte[] buffer = new byte[bufferSize]; + byte[] input = BytesGenerator.get(inputSize); + + SnappyCompressor compressor = new SnappyCompressor(); + compressor.setInput(input, 0, inputSize); + compressor.finish(); + while (!compressor.finished()) { + int len = compressor.compress(buffer, 0, buffer.length); + out.write(buffer, 0, len); + } + byte[] compressed = out.toByteArray(); + assertThat(compressed).hasSizeGreaterThan(0); + out.reset(); + + SnappyDecompressor decompressor = new SnappyDecompressor(); + decompressor.setInput(compressed, 0, compressed.length); + while (!decompressor.finished()) { + int len = decompressor.decompress(buffer, 0, buffer.length); + out.write(buffer, 0, len); + } + byte[] decompressed = out.toByteArray(); + + assertThat(decompressed).isEqualTo(input); + } + private void compressDecompressLoop(int rawDataSize) throws IOException { byte[] rawData = BytesGenerator.get(rawDataSize); byte[] compressedResult = new byte[rawDataSize+20]; @@ -318,8 +353,9 @@ private void compressDecompressLoop(int rawDataSize) throws IOException { @Test public void testSnappyDirectBlockCompression() { - int[] size = { 4 * 1024, 64 * 1024, 128 * 1024, 1024 * 1024 }; - assumeTrue(SnappyCodec.isNativeCodeLoaded()); + int[] size = new int[] { + 4 * 1024, 64 * 1024, 128 * 1024, 1024 * 1024 + }; try { for (int i = 0; i < size.length; i++) { compressDecompressLoop(size[i]); @@ -408,4 +444,52 @@ public void doWork() throws Exception { ctx.waitFor(60000); } + + @Test + public void testSnappyCompatibility() throws Exception { + // HADOOP-17125. Using snappy-java in SnappyCodec. These strings are raw + // data and compressed data using previous native Snappy codec. We use + // updated Snappy codec to decode it and check if it matches. + String rawData = "010a06030a040a0c0109020c0a010204020d02000b010701080605" + + "080b090902060a080502060a0d06070908080a0c0105030904090d050908000" + + "40c090c0d0d0804000d00040b0b0d010d060907020a030a0c09000409050801" + + "07040d0c01060a0b09070a04000b01040b09000e0e00020b06050b060e030e0" + + "a07050d06050d"; + String compressed = "8001f07f010a06030a040a0c0109020c0a010204020d02000b0" + + "10701080605080b090902060a080502060a0d06070908080a0c010503090409" + + "0d05090800040c090c0d0d0804000d00040b0b0d010d060907020a030a0c090" + + "0040905080107040d0c01060a0b09070a04000b01040b09000e0e00020b0605" + + "0b060e030e0a07050d06050d"; + + byte[] rawDataBytes = Hex.decodeHex(rawData); + byte[] compressedBytes = Hex.decodeHex(compressed); + + ByteBuffer inBuf = ByteBuffer.allocateDirect(compressedBytes.length); + inBuf.put(compressedBytes, 0, compressedBytes.length); + inBuf.flip(); + + ByteBuffer outBuf = ByteBuffer.allocateDirect(rawDataBytes.length); + ByteBuffer expected = ByteBuffer.wrap(rawDataBytes); + + SnappyDecompressor.SnappyDirectDecompressor decompressor = + new SnappyDecompressor.SnappyDirectDecompressor(); + + outBuf.clear(); + while(!decompressor.finished()) { + decompressor.decompress(inBuf, outBuf); + if (outBuf.remaining() == 0) { + outBuf.flip(); + while (outBuf.remaining() > 0) { + assertEquals(expected.get(), outBuf.get()); + } + outBuf.clear(); + } + } + outBuf.flip(); + while (outBuf.remaining() > 0) { + assertEquals(expected.get(), outBuf.get()); + } + outBuf.clear(); + assertEquals(0, expected.remaining()); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java index 7e2ab241e195d..ac9ea5e8a8468 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java @@ -42,7 +42,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestZlibCompressorDecompressor { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java index 362bde9806327..631991a03cf9c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.io.erasurecode.rawcoder; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.io.erasurecode.ErasureCoderOptions; import org.apache.hadoop.util.StopWatch; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java index bbb4ec21812e3..6742425e766e8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ipc; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; @@ -66,7 +66,7 @@ private static class MyOptions { public int secondsToRun = 15; private int msgSize = 1024; public Class rpcEngine = - ProtobufRpcEngine.class; + ProtobufRpcEngine2.class; private MyOptions(String args[]) { try { @@ -181,7 +181,7 @@ private void processOptions(CommandLine line, Options opts) if (line.hasOption('e')) { String eng = line.getOptionValue('e'); if ("protobuf".equals(eng)) { - rpcEngine = ProtobufRpcEngine.class; + rpcEngine = ProtobufRpcEngine2.class; } else { throw new ParseException("invalid engine: " + eng); } @@ -224,7 +224,7 @@ private Server startServer(MyOptions opts) throws IOException { RPC.Server server; // Get RPC server for server side implementation - if (opts.rpcEngine == ProtobufRpcEngine.class) { + if (opts.rpcEngine == ProtobufRpcEngine2.class) { // Create server side implementation PBServerImpl serverImpl = new PBServerImpl(); BlockingService service = TestProtobufRpcProto @@ -378,7 +378,7 @@ private interface RpcServiceWrapper { private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException { InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.getPort()); - if (opts.rpcEngine == ProtobufRpcEngine.class) { + if (opts.rpcEngine == ProtobufRpcEngine2.class) { final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf); return new RpcServiceWrapper() { @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java index 71723325e2c86..3b8c58c26d66e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java @@ -42,9 +42,8 @@ public class TestDecayRpcScheduler { private Schedulable mockCall(String id) { Schedulable mockCall = mock(Schedulable.class); - UserGroupInformation ugi = mock(UserGroupInformation.class); + UserGroupInformation ugi = UserGroupInformation.createRemoteUser(id); - when(ugi.getUserName()).thenReturn(id); when(mockCall.getUserGroupInformation()).thenReturn(ugi); return mockCall; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index 82540637a2004..32881523fded0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -98,9 +98,9 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Supplier; -import com.google.common.primitives.Bytes; -import com.google.common.primitives.Ints; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java index 10e23baefef9b..c1b0858697682 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java @@ -45,7 +45,7 @@ public void testPBService() throws Exception { // Set RPC engine to protobuf RPC engine Configuration conf2 = new Configuration(); RPC.setProtocolEngine(conf2, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); TestRpcService client = RPC.getProxy(TestRpcService.class, 0, addr, conf2); TestProtoBufRpc.testProtoBufRpc(client); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java index dfb9e934f6055..d813c6b784f5d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java @@ -25,8 +25,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto; -import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto; import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto; import org.apache.hadoop.ipc.protobuf.TestProtos.OptRequestProto; @@ -138,7 +136,7 @@ public void testProtocolVersionMismatch() throws IOException, ServiceException { conf = new Configuration(); conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, NewRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, NewRpcService.class, ProtobufRpcEngine2.class); // Create server side implementation NewServerImpl serverImpl = new NewServerImpl(); @@ -151,7 +149,7 @@ public void testProtocolVersionMismatch() throws IOException, ServiceException { server.start(); - RPC.setProtocolEngine(conf, OldRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, OldRpcService.class, ProtobufRpcEngine2.class); OldRpcService proxy = RPC.getProxy(OldRpcService.class, 0, addr, conf); // Verify that exception is thrown if protocolVersion is mismatch between @@ -168,7 +166,8 @@ public void testProtocolVersionMismatch() throws IOException, ServiceException { } // Verify that missing of optional field is still compatible in RPC call. - RPC.setProtocolEngine(conf, NewerRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, NewerRpcService.class, + ProtobufRpcEngine2.class); NewerRpcService newProxy = RPC.getProxy(NewerRpcService.class, 0, addr, conf); newProxy.echo(null, emptyRequest); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java index facb8fdd8b191..06c3646310412 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java @@ -94,8 +94,9 @@ public void setUp() throws IOException { // Setup server for both protocols conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024); conf.setBoolean(CommonConfigurationKeys.IPC_SERVER_LOG_SLOW_RPC, true); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); - RPC.setProtocolEngine(conf, TestRpcService2.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); + RPC.setProtocolEngine(conf, TestRpcService2.class, + ProtobufRpcEngine2.class); // Create server side implementation PBServerImpl serverImpl = new PBServerImpl(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java index 32300d4f876e1..922e9192c41c6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java @@ -52,7 +52,7 @@ public void test() throws Exception { TestProtobufRpcHandoffProto.newReflectiveBlockingService(serverImpl); RPC.setProtocolEngine(conf, TestProtoBufRpcServerHandoffProtocol.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); RPC.Server server = new RPC.Builder(conf) .setProtocol(TestProtoBufRpcServerHandoffProtocol.class) .setInstance(blockingService) @@ -144,8 +144,8 @@ public static class TestProtoBufRpcServerHandoffServer TestProtos.SleepRequestProto2 request) throws ServiceException { final long startTime = System.currentTimeMillis(); - final ProtobufRpcEngineCallback callback = - ProtobufRpcEngine.Server.registerForDeferredResponse(); + final ProtobufRpcEngineCallback2 callback = + ProtobufRpcEngine2.Server.registerForDeferredResponse(); final long sleepTime = request.getSleepTime(); new Thread() { @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java index 640ca3d2b89ed..9fbb865c6e516 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.ipc.Server.Call; import org.apache.hadoop.ipc.Server.Connection; +import org.apache.hadoop.ipc.metrics.RpcMetrics; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto; import org.apache.hadoop.ipc.protobuf.TestProtos; @@ -81,6 +82,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -1095,7 +1097,9 @@ public TestRpcService run() { proxy.lockAndSleep(null, newSleepRequest(5)); rpcMetrics = getMetrics(server.getRpcMetrics().name()); - assertGauge("RpcLockWaitTimeAvgTime", 10000.0, rpcMetrics); + assertGauge("RpcLockWaitTimeAvgTime", + (double)(RpcMetrics.TIMEUNIT.convert(10L, TimeUnit.SECONDS)), + rpcMetrics); } finally { if (proxy2 != null) { RPC.stopProxy(proxy2); @@ -1290,6 +1294,43 @@ public void testDecayRpcSchedulerMetrics() throws Exception { } } + @Test (timeout=30000) + public void testProtocolUserPriority() throws Exception { + final String ns = CommonConfigurationKeys.IPC_NAMESPACE + ".0"; + conf.set(CLIENT_PRINCIPAL_KEY, "clientForProtocol"); + Server server = null; + try { + server = setupDecayRpcSchedulerandTestServer(ns + "."); + + UserGroupInformation ugi = UserGroupInformation.createRemoteUser("user"); + // normal users start with priority 0. + Assert.assertEquals(0, server.getPriorityLevel(ugi)); + // calls for a protocol defined client will have priority of 0. + Assert.assertEquals(0, server.getPriorityLevel(newSchedulable(ugi))); + + // protocol defined client will have top priority of -1. + ugi = UserGroupInformation.createRemoteUser("clientForProtocol"); + Assert.assertEquals(-1, server.getPriorityLevel(ugi)); + // calls for a protocol defined client will have priority of 0. + Assert.assertEquals(0, server.getPriorityLevel(newSchedulable(ugi))); + } finally { + stop(server, null); + } + } + + private static Schedulable newSchedulable(UserGroupInformation ugi) { + return new Schedulable(){ + @Override + public UserGroupInformation getUserGroupInformation() { + return ugi; + } + @Override + public int getPriorityLevel() { + return 0; // doesn't matter. + } + }; + } + private Server setupDecayRpcSchedulerandTestServer(String ns) throws Exception { final int queueSizePerHandler = 3; @@ -1550,6 +1591,18 @@ public RpcStatusProto getRpcStatusProto() { } } + @Test + public void testSetProtocolEngine() { + Configuration conf = new Configuration(); + RPC.setProtocolEngine(conf, StoppedProtocol.class, StoppedRpcEngine.class); + RpcEngine rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf); + assertTrue(rpcEngine instanceof StoppedRpcEngine); + + RPC.setProtocolEngine(conf, StoppedProtocol.class, ProtobufRpcEngine.class); + rpcEngine = RPC.getProtocolEngine(StoppedProtocol.class, conf); + assertTrue(rpcEngine instanceof StoppedRpcEngine); + } + public static void main(String[] args) throws Exception { new TestRPC().testCallsInternal(conf); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java index ffee086fa9801..22fdcbbe14e65 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java @@ -114,19 +114,19 @@ public void setUp() { ProtocolSignature.resetCache(); RPC.setProtocolEngine(conf, - TestProtocol0.class, ProtobufRpcEngine.class); + TestProtocol0.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol1.class, ProtobufRpcEngine.class); + TestProtocol1.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol2.class, ProtobufRpcEngine.class); + TestProtocol2.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol3.class, ProtobufRpcEngine.class); + TestProtocol3.class, ProtobufRpcEngine2.class); RPC.setProtocolEngine(conf, - TestProtocol4.class, ProtobufRpcEngine.class); + TestProtocol4.class, ProtobufRpcEngine2.class); } @After diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java index d810fe3c5a1e0..90973d2674c01 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java @@ -44,7 +44,7 @@ public class TestRPCWaitForProxy extends TestRpcBase { @Before public void setupProtocolEngine() { RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java index 2729dc3cd9daa..65558a7980a2d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java @@ -26,7 +26,6 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.TestConnectionRetryPolicy; import org.apache.hadoop.ipc.Client.ConnectionId; -import org.apache.hadoop.ipc.TestRpcBase.TestRpcService; import org.junit.Before; import org.junit.Test; @@ -129,7 +128,7 @@ private void verifyRetryPolicyReuseConnections( try { proxy1 = getClient(addr, newConf, retryPolicy1); proxy1.ping(null, newEmptyRequest()); - client = ProtobufRpcEngine.getClient(newConf); + client = ProtobufRpcEngine2.getClient(newConf); final Set conns = client.getConnectionIds(); assertEquals("number of connections in cache is wrong", 1, conns.size()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java index bf24d680dde2e..0962b50099c57 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java @@ -62,6 +62,8 @@ public class TestRpcBase { protected final static String SERVER_PRINCIPAL_KEY = "test.ipc.server.principal"; + protected final static String CLIENT_PRINCIPAL_KEY = + "test.ipc.client.principal"; protected final static String ADDRESS = "0.0.0.0"; protected final static int PORT = 0; protected static InetSocketAddress addr; @@ -70,7 +72,7 @@ public class TestRpcBase { protected void setupConf() { conf = new Configuration(); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); } @@ -271,7 +273,8 @@ public Token selectToken(Text service, } } - @KerberosInfo(serverPrincipal = SERVER_PRINCIPAL_KEY) + @KerberosInfo(serverPrincipal = SERVER_PRINCIPAL_KEY, + clientPrincipal = CLIENT_PRINCIPAL_KEY) @TokenInfo(TestTokenSelector.class) @ProtocolInfo(protocolName = "org.apache.hadoop.ipc.TestRpcBase$TestRpcService", protocolVersion = 1) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 72f73822b6fd0..72085a19ec711 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -169,7 +169,7 @@ public void setup() { clientFallBackToSimpleAllowed = true; // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); } static String getQOPNames (QualityOfProtection[] qops){ @@ -356,7 +356,7 @@ public void testPerConnectionConf() throws Exception { newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]); proxy1 = getClient(addr, newConf); proxy1.getAuthMethod(null, newEmptyRequest()); - client = ProtobufRpcEngine.getClient(newConf); + client = ProtobufRpcEngine2.getClient(newConf); Set conns = client.getConnectionIds(); assertEquals("number of connections in cache is wrong", 1, conns.size()); // same conf, connection should be re-used @@ -533,13 +533,16 @@ public void handle(Callback[] callbacks) } private static Pattern BadToken = - Pattern.compile(".*DIGEST-MD5: digest response format violation.*"); + Pattern.compile("^" + RemoteException.class.getName() + + "\\("+ SaslException.class.getName() + "\\): " + + "DIGEST-MD5: digest response format violation.*"); private static Pattern KrbFailed = Pattern.compile(".*Failed on local exception:.* " + "Failed to specify server's Kerberos principal name.*"); private static Pattern Denied(AuthMethod method) { - return Pattern.compile(".*RemoteException.*AccessControlException.*: " - + method + " authentication is not enabled.*"); + return Pattern.compile("^" + RemoteException.class.getName() + + "\\(" + AccessControlException.class.getName() + "\\): " + + method + " authentication is not enabled.*"); } private static Pattern No(AuthMethod ... method) { String methods = StringUtils.join(method, ",\\s*"); @@ -547,10 +550,10 @@ private static Pattern No(AuthMethod ... method) { "Client cannot authenticate via:\\[" + methods + "\\].*"); } private static Pattern NoTokenAuth = - Pattern.compile(".*IllegalArgumentException: " + + Pattern.compile("^" + IllegalArgumentException.class.getName() + ": " + "TOKEN authentication requires a secret manager"); private static Pattern NoFallback = - Pattern.compile(".*Failed on local exception:.* " + + Pattern.compile("^" + AccessControlException.class.getName() + ":.* " + "Server asks us to fall back to SIMPLE auth, " + "but this client is configured to only allow secure connections.*"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsRecords.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsRecords.java index 5d52cad66bb90..786571441fd1b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsRecords.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/MetricsRecords.java @@ -18,8 +18,8 @@ package org.apache.hadoop.metrics2.impl; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; +import java.util.function.Predicate; +import java.util.stream.StreamSupport; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsTag; @@ -65,16 +65,22 @@ public static void assertMetricNotNull(MetricsRecord record, resourceLimitMetric); } - private static MetricsTag getFirstTagByName(MetricsRecord record, String name) { - return Iterables.getFirst(Iterables.filter(record.tags(), - new MetricsTagPredicate(name)), null); + private static MetricsTag getFirstTagByName(MetricsRecord record, + String name) { + if (record.tags() == null) { + return null; + } + return record.tags().stream().filter( + new MetricsTagPredicate(name)).findFirst().orElse(null); } private static AbstractMetric getFirstMetricByName( MetricsRecord record, String name) { - return Iterables.getFirst( - Iterables.filter(record.metrics(), new AbstractMetricPredicate(name)), - null); + if (record.metrics() == null) { + return null; + } + return StreamSupport.stream(record.metrics().spliterator(), false) + .filter(new AbstractMetricPredicate(name)).findFirst().orElse(null); } private static class MetricsTagPredicate implements Predicate { @@ -86,7 +92,7 @@ public MetricsTagPredicate(String tagName) { } @Override - public boolean apply(MetricsTag input) { + public boolean test(MetricsTag input) { return input.name().equals(tagName); } } @@ -101,7 +107,7 @@ public AbstractMetricPredicate( } @Override - public boolean apply(AbstractMetric input) { + public boolean test(AbstractMetric input) { return input.name().equals(metricName); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java index b53be4d73599a..2ca1c8ad2cc35 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java @@ -133,6 +133,22 @@ private void testInstances(MetricsConfig c) throws Exception { assertEq(expected, mc2); } + /** + * Test the config value separated by delimiter + */ + @Test public void testDelimiterConf() { + String filename = getTestFilename("test-metrics2-delimiter"); + new ConfigBuilder().add("p1.foo", "p1foo1,p1foo2,p1foo3").save(filename); + + MetricsConfig mc = MetricsConfig.create("p1", filename); + Configuration expected = new ConfigBuilder() + .add("foo", "p1foo1") + .add("foo", "p1foo2") + .add("foo", "p1foo3") + .config; + assertEq(expected, mc); + } + /** * Return a test filename in the class path * @param basename diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java index 3fdf445d66447..0dabe468e49e3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java @@ -28,7 +28,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java index 47a3b4cdc092b..5a1f1d1376d4a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java @@ -23,9 +23,7 @@ import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.*; - -import javax.annotation.Nullable; - +import java.util.stream.StreamSupport; import org.junit.Test; import org.junit.runner.RunWith; @@ -38,9 +36,8 @@ import static org.junit.Assert.*; import static org.mockito.Mockito.*; -import com.google.common.base.Predicate; -import com.google.common.base.Supplier; -import com.google.common.collect.Iterables; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import org.apache.commons.configuration2.SubsetConfiguration; import org.apache.hadoop.metrics2.MetricsException; @@ -59,7 +56,6 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -246,13 +242,9 @@ public void run() { for (Thread t : threads) t.join(); assertEquals(0L, ms.droppedPubAll.value()); - assertTrue(StringUtils.join("\n", Arrays.asList(results)), - Iterables.all(Arrays.asList(results), new Predicate() { - @Override - public boolean apply(@Nullable String input) { - return input.equalsIgnoreCase("Passed"); - } - })); + assertTrue(String.join("\n", Arrays.asList(results)), + Arrays.asList(results).stream().allMatch( + input -> input.equalsIgnoreCase("Passed"))); ms.stop(); ms.shutdown(); } @@ -482,14 +474,12 @@ public Object answer(InvocationOnMock invocation) throws Throwable { ms.onTimerEvent(); verify(dataSink, timeout(500).times(2)).putMetrics(r1.capture()); List mr = r1.getAllValues(); - Number qSize = Iterables.find(mr.get(1).metrics(), - new Predicate() { - @Override - public boolean apply(@Nullable AbstractMetric input) { - assert input != null; - return input.name().equals("Sink_slowSinkQsize"); - } - }).value(); + Number qSize = StreamSupport.stream(mr.get(1).metrics().spliterator(), + false).filter( + input -> { + assert input != null; + return input.name().equals("Sink_slowSinkQsize"); + }).findFirst().get().value(); assertEquals(1, qSize); } finally { proceedSignal.countDown(); @@ -639,4 +629,25 @@ private static class TestSource2 { private static String getPluginUrlsAsString() { return "file:metrics2-test-plugin.jar"; } + + @Test + public void testMetricSystemRestart() { + MetricsSystemImpl ms = new MetricsSystemImpl("msRestartTestSystem"); + TestSink ts = new TestSink(); + String sinkName = "restartTestSink"; + + try { + ms.start(); + ms.register(sinkName, "", ts); + assertNotNull("no adapter exists for " + sinkName, + ms.getSinkAdapter(sinkName)); + ms.stop(); + + ms.start(); + assertNotNull("no adapter exists for " + sinkName, + ms.getSinkAdapter(sinkName)); + } finally { + ms.stop(); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRollingAverages.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRollingAverages.java index 9bfdd73bee832..ad90c1860514a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRollingAverages.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRollingAverages.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.metrics2.lib; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java index aa9370933722f..3e3bdb7b413b1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/MockDomainNameResolver.java @@ -23,7 +23,7 @@ import java.util.Map; import java.util.TreeMap; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This mock resolver class returns the predefined resolving/reverse lookup @@ -39,8 +39,8 @@ public class MockDomainNameResolver implements DomainNameResolver { public static final byte[] BYTE_ADDR_2 = new byte[]{10, 1, 1, 2}; public static final String ADDR_1 = "10.1.1.1"; public static final String ADDR_2 = "10.1.1.2"; - public static final String FQDN_1 = "host01.com"; - public static final String FQDN_2 = "host02.com"; + public static final String FQDN_1 = "host01.test"; + public static final String FQDN_2 = "host02.test"; /** Internal mapping of domain names and IP addresses. */ private Map addrs = new TreeMap<>(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java index 80f2ebc98ced8..872791d1ff7bf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java @@ -22,6 +22,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.net.InetAddress; import java.net.ServerSocket; import java.util.Random; @@ -49,7 +50,8 @@ public static int getPort(int port, int retries) throws IOException { if (tryPort == 0) { continue; } - try (ServerSocket s = new ServerSocket(tryPort)) { + try (ServerSocket s = new ServerSocket(tryPort, 50, + InetAddress.getLoopbackAddress())) { LOG.info("Using port " + tryPort); return tryPort; } catch (IOException e) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java index b11b1e96ded59..76284932c43de 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java @@ -95,7 +95,25 @@ public void testAvoidLoopbackTcpSockets() throws Throwable { assertInException(se, "Invalid argument"); } } - + + @Test + public void testInvalidAddress() throws Throwable { + Configuration conf = new Configuration(); + + Socket socket = NetUtils.getDefaultSocketFactory(conf) + .createSocket(); + socket.bind(new InetSocketAddress("127.0.0.1", 0)); + try { + NetUtils.connect(socket, + new InetSocketAddress("invalid-test-host", + 0), 20000); + socket.close(); + fail("Should not have connected"); + } catch (UnknownHostException uhe) { + LOG.info("Got exception: ", uhe); + } + } + @Test public void testSocketReadTimeoutWithChannel() throws Exception { doSocketReadTimeoutTest(true); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java index 4b8b02f0171d4..86870e1257119 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java @@ -21,8 +21,8 @@ import static org.junit.Assert.assertEquals; -import com.google.common.base.Charsets; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; import java.io.File; import java.io.IOException; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java index c0d204f86aa82..466c83eb58212 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java @@ -49,7 +49,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; public class TestDomainSocket { private static TemporarySocketDirectory sockDir; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java index aa522f266014b..ca801dac2c247 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java @@ -31,7 +31,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index c86b9ae344195..edd537011c4a8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.TestRpcBase; @@ -151,7 +151,7 @@ public void testRealUserSetup() throws IOException { configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); // Set RPC engine to protobuf RPC engine RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5); @@ -181,7 +181,7 @@ public void testRealUserAuthorizationSuccess() throws IOException { getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5); @@ -215,7 +215,7 @@ public void testRealUserIPAuthorizationFailure() throws IOException { getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5); @@ -251,7 +251,7 @@ public void testRealUserIPNotSpecified() throws IOException { conf.setStrings(DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 2); @@ -286,7 +286,7 @@ public void testRealUserGroupNotSpecified() throws IOException { final Configuration conf = new Configuration(); configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 2); @@ -322,7 +322,7 @@ public void testRealUserGroupAuthorizationFailure() throws IOException { getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group3"); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 2); @@ -363,7 +363,7 @@ public void testProxyWithToken() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); final Server server = setupTestServer(conf, 5, sm); @@ -411,7 +411,7 @@ public void testTokenBySuperUser() throws Exception { SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf); // Set RPC engine to protobuf RPC engine RPC.setProtocolEngine(newConf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(newConf); final Server server = setupTestServer(newConf, 5, sm); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java index 46e9f92258502..ebff93d50d5e1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java @@ -34,7 +34,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java index dba2b086f897f..aba3997187747 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java @@ -389,8 +389,9 @@ public void run() { fail("The LDAP query should have timed out!"); } catch (NamingException ne) { LOG.debug("Got the exception while LDAP querying: ", ne); - assertExceptionContains("LDAP response read timed out, timeout used:" + - connectionTimeoutMs + "ms", ne); + assertExceptionContains("LDAP response read timed out, timeout used", + ne); + assertExceptionContains("" + connectionTimeoutMs, ne); assertFalse(ne.getMessage().contains("remaining name")); } finally { finLatch.countDown(); @@ -444,8 +445,9 @@ public void run() { fail("The LDAP query should have timed out!"); } catch (NamingException ne) { LOG.debug("Got the exception while LDAP querying: ", ne); - assertExceptionContains("LDAP response read timed out, timeout used:" + - readTimeoutMs + "ms", ne); + assertExceptionContains("LDAP response read timed out, timeout used", + ne); + assertExceptionContains(""+ readTimeoutMs, ne); assertExceptionContains("remaining name", ne); } finally { finLatch.countDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithBindUserSwitch.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithBindUserSwitch.java index 4f080b3fede96..3b4c77d9e4ab9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithBindUserSwitch.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithBindUserSwitch.java @@ -18,7 +18,7 @@ package org.apache.hadoop.security; -import com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.alias.CredentialProvider; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java index 016c589ae3a24..9dcc4ca3fcebd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java @@ -46,7 +46,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; public class TestSecurityUtil { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java index e6fdc2bcdfbd4..939209d267b50 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java @@ -32,8 +32,8 @@ import org.apache.hadoop.security.ShellBasedIdMapping.StaticMapping; import org.junit.Test; -import com.google.common.collect.BiMap; -import com.google.common.collect.HashBiMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.BiMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashBiMap; import static org.assertj.core.api.Assertions.assertThat; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java index 8ede451db964c..db0095f2171e2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java @@ -23,6 +23,7 @@ import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Time; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -101,12 +102,35 @@ public void stopMiniKdc() { } } + /** + * Login from keytab using the MiniKDC. + */ + @Test + public void testUGILoginFromKeytab() throws Exception { + long beforeLogin = Time.now(); + String principal = "foo"; + File keytab = new File(workDir, "foo.keytab"); + kdc.createPrincipal(keytab, principal); + + UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath()); + UserGroupInformation ugi = UserGroupInformation.getLoginUser(); + Assert.assertTrue("UGI should be configured to login from keytab", + ugi.isFromKeytab()); + + User user = getUser(ugi.getSubject()); + Assert.assertNotNull(user.getLogin()); + + Assert.assertTrue("User login time is less than before login time, " + + "beforeLoginTime:" + beforeLogin + " userLoginTime:" + user.getLastLogin(), + user.getLastLogin() > beforeLogin); + } + /** * Login from keytab using the MiniKDC and verify the UGI can successfully * relogin from keytab as well. This will catch regressions like HADOOP-10786. */ @Test - public void testUGILoginFromKeytab() throws Exception { + public void testUGIReLoginFromKeytab() throws Exception { String principal = "foo"; File keytab = new File(workDir, "foo.keytab"); kdc.createPrincipal(keytab, principal); @@ -122,6 +146,9 @@ public void testUGILoginFromKeytab() throws Exception { final LoginContext login1 = user.getLogin(); Assert.assertNotNull(login1); + // Sleep for 2 secs to have a difference between first and second login + Thread.sleep(2000); + ugi.reloginFromKeytab(); final long secondLogin = user.getLastLogin(); final LoginContext login2 = user.getLogin(); @@ -131,6 +158,42 @@ public void testUGILoginFromKeytab() throws Exception { Assert.assertNotSame(login1, login2); } + /** + * Force re-login from keytab using the MiniKDC and verify the UGI can + * successfully relogin from keytab as well. + */ + @Test + public void testUGIForceReLoginFromKeytab() throws Exception { + // Set this to false as we are testing force re-login anyways + UserGroupInformation.setShouldRenewImmediatelyForTests(false); + String principal = "foo"; + File keytab = new File(workDir, "foo.keytab"); + kdc.createPrincipal(keytab, principal); + + UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath()); + UserGroupInformation ugi = UserGroupInformation.getLoginUser(); + Assert.assertTrue("UGI should be configured to login from keytab", + ugi.isFromKeytab()); + + // Verify relogin from keytab. + User user = getUser(ugi.getSubject()); + final long firstLogin = user.getLastLogin(); + final LoginContext login1 = user.getLogin(); + Assert.assertNotNull(login1); + + // Sleep for 2 secs to have a difference between first and second login + Thread.sleep(2000); + + // Force relogin from keytab + ugi.forceReloginFromKeytab(); + final long secondLogin = user.getLastLogin(); + final LoginContext login2 = user.getLogin(); + Assert.assertTrue("User should have been able to relogin from keytab", + secondLogin > firstLogin); + Assert.assertNotNull(login2); + Assert.assertNotSame(login1, login2); + } + @Test public void testGetUGIFromKnownSubject() throws Exception { KerberosPrincipal principal = new KerberosPrincipal("user"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java index 9061fe752c88e..ab9de2d308ac0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java @@ -21,6 +21,8 @@ import static org.junit.Assert.fail; import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.security.SecureRandom; import java.util.Arrays; import java.util.Collection; @@ -370,7 +372,7 @@ public void testNullIpAddress() throws Exception { PROXY_USER_NAME, realUserUgi, GROUP_NAMES); // remote address is null - ProxyUsers.authorize(proxyUserUgi, null); + ProxyUsers.authorize(proxyUserUgi, (InetAddress) null); } @Test @@ -533,9 +535,21 @@ public void testNoHostsForUsers() throws Exception { assertNotAuthorized(proxyUserUgi, "1.2.3.4"); } + private static InetAddress toFakeAddress(String ip) { + try { + InetAddress addr = InetAddress.getByName(ip); + return InetAddress.getByAddress(ip.replace('.', '-'), + addr.getAddress()); + } catch (UnknownHostException e) { + throw new IllegalArgumentException(e); + } + } + private void assertNotAuthorized(UserGroupInformation proxyUgi, String host) { try { + // test both APIs. ProxyUsers.authorize(proxyUgi, host); + ProxyUsers.authorize(proxyUgi, toFakeAddress(host)); fail("Allowed authorization of " + proxyUgi + " from " + host); } catch (AuthorizationException e) { // Expected @@ -544,7 +558,9 @@ private void assertNotAuthorized(UserGroupInformation proxyUgi, String host) { private void assertAuthorized(UserGroupInformation proxyUgi, String host) { try { + // test both APIs. ProxyUsers.authorize(proxyUgi, host); + ProxyUsers.authorize(proxyUgi, toFakeAddress(host)); } catch (AuthorizationException e) { fail("Did not allow authorization of " + proxyUgi + " from " + host); } @@ -560,9 +576,9 @@ public void init(String configurationPrefix) { * Authorize a user (superuser) to impersonate another user (user1) if the * superuser belongs to the group "sudo_user1" . */ - - public void authorize(UserGroupInformation user, - String remoteAddress) throws AuthorizationException{ + @Override + public void authorize(UserGroupInformation user, + InetAddress remoteAddress) throws AuthorizationException{ UserGroupInformation superUser = user.getRealUser(); String sudoGroupName = "sudo_" + user.getShortUserName(); @@ -572,6 +588,7 @@ public void authorize(UserGroupInformation user, } } + @Override public void setConf(Configuration conf) { @@ -597,7 +614,6 @@ public static void loadTest(String ipString, int testRange) { ); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); - // First try proxying a group that's allowed UserGroupInformation realUserUgi = UserGroupInformation .createRemoteUser(REAL_USER_NAME); @@ -608,7 +624,8 @@ public static void loadTest(String ipString, int testRange) { SecureRandom sr = new SecureRandom(); for (int i=1; i < 1000000; i++){ try { - ProxyUsers.authorize(proxyUserUgi, "1.2.3."+ sr.nextInt(testRange)); + ProxyUsers.authorize(proxyUserUgi, + toFakeAddress("1.2.3."+ sr.nextInt(testRange))); } catch (AuthorizationException e) { } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 3fb203ee2b93b..441f552649298 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -21,7 +21,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.junit.BeforeClass; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java index c9571ff21e847..99922209c1102 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java @@ -24,7 +24,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.curator.RetryPolicy; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AssertExtensions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AssertExtensions.java new file mode 100644 index 0000000000000..8c5e553f71ee3 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/AssertExtensions.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.test; + +import java.util.concurrent.Callable; + +import org.assertj.core.description.Description; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Extra classes to work with AssertJ. + * These are kept separate from {@link LambdaTestUtils} so there's + * no requirement for AssertJ to be on the classpath in that broadly + * used class. + */ +public final class AssertExtensions { + + private static final Logger LOG = + LoggerFactory.getLogger(AssertExtensions.class); + + private AssertExtensions() { + } + + /** + * A description for AssertJ "describedAs" clauses which evaluates the + * lambda-expression only on failure. That must return a string + * or null/"" to be skipped. + * @param eval lambda expression to invoke + * @return a description for AssertJ + */ + public static Description dynamicDescription(Callable eval) { + return new DynamicDescription(eval); + } + + private static final class DynamicDescription extends Description { + private final Callable eval; + + private DynamicDescription(final Callable eval) { + this.eval = eval; + } + + @Override + public String value() { + try { + return eval.call(); + } catch (Exception e) { + LOG.warn("Failed to evaluate description: " + e); + LOG.debug("Evaluation failure", e); + // return null so that the description evaluation chain + // will skip this one + return null; + } + } + } + + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 9e91634873607..e266f285685ac 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -32,12 +32,14 @@ import java.lang.reflect.InvocationTargetException; import java.util.Arrays; import java.util.Locale; +import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.Enumeration; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; import java.util.regex.Pattern; import org.apache.commons.io.IOUtils; @@ -60,9 +62,8 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Joiner; -import com.google.common.base.Supplier; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * Test provides some very generic helpers which might be used across the tests @@ -88,7 +89,8 @@ public abstract class GenericTestUtils { public static final String DEFAULT_TEST_DATA_PATH = "target/test/data/"; /** - * Error string used in {@link GenericTestUtils#waitFor(Supplier, int, int)}. + * Error string used in + * {@link GenericTestUtils#waitFor(Supplier, long, long)}. */ public static final String ERROR_MISSING_ARGUMENT = "Input supplier interface should be initailized"; @@ -227,6 +229,22 @@ public static int uniqueSequenceId() { return sequence.incrementAndGet(); } + /** + * Creates a directory for the data/logs of the unit test. + * It first deletes the directory if it exists. + * + * @param testClass the unit test class. + * @return the Path of the root directory. + */ + public static File setupTestRootDir(Class testClass) { + File testRootDir = getTestDir(testClass.getSimpleName()); + if (testRootDir.exists()) { + FileUtil.fullyDelete(testRootDir); + } + testRootDir.mkdirs(); + return testRootDir; + } + /** * Get the (created) base directory for tests. * @return the absolute directory @@ -380,9 +398,7 @@ public static void assertExceptionContains(String expectedText, public static void waitFor(final Supplier check, final long checkEveryMillis, final long waitForMillis) throws TimeoutException, InterruptedException { - if (check == null) { - throw new NullPointerException(ERROR_MISSING_ARGUMENT); - } + Objects.requireNonNull(check, ERROR_MISSING_ARGUMENT); if (waitForMillis < checkEveryMillis) { throw new IllegalArgumentException(ERROR_INVALID_ARGUMENT); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java index ad265afc3a022..3e0d31dc6a150 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.test; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java index 0cb5288e881ff..eb8d938994735 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java @@ -18,7 +18,7 @@ package org.apache.hadoop.test; -import static com.google.common.base.Preconditions.*; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; import org.junit.Assert; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java index fb7bd22fedfc9..8489e3d24f368 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java @@ -23,7 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.event.Level; import static org.junit.Assert.assertEquals; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java index d4599b0ecc2d7..d87da0ac301c7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tracing; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.htrace.core.Span; import org.apache.htrace.core.SpanId; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java index 478a29b3317b4..85d95738b5ef5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.util; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; @@ -132,6 +132,10 @@ private static void createJar(File dir, File jarFile) throws IOException { * @return path to the Jar containing the class. */ public static String getJar(Class klass) { + return getJar(klass, null); + } + + public static String getJar(Class klass, String testSubDir) { Preconditions.checkNotNull(klass, "klass"); ClassLoader loader = klass.getClassLoader(); if (loader != null) { @@ -154,15 +158,18 @@ else if ("file".equals(url.getProtocol())) { klassName = klassName.replace(".", "/") + ".class"; path = path.substring(0, path.length() - klassName.length()); File baseDir = new File(path); - File testDir = GenericTestUtils.getTestDir(); + File testDir = + testSubDir == null ? GenericTestUtils.getTestDir() + : GenericTestUtils.getTestDir(testSubDir); testDir = testDir.getAbsoluteFile(); if (!testDir.exists()) { testDir.mkdirs(); } - File tempJar = File.createTempFile("hadoop-", "", testDir); - tempJar = new File(tempJar.getAbsolutePath() + ".jar"); + File tempFile = File.createTempFile("hadoop-", "", testDir); + File tempJar = new File(tempFile.getAbsolutePath() + ".jar"); createJar(baseDir, tempJar); tempJar.deleteOnExit(); + tempFile.deleteOnExit(); return tempJar.getAbsolutePath(); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java index 570e54214ad54..0fb887676274a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java @@ -41,8 +41,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Splitter; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestApplicationClassLoader { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java index c8fd754666c9e..d6da2f86cc3c4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java @@ -26,7 +26,7 @@ import org.junit.Test; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestDirectBufferPool { final org.apache.hadoop.util.DirectBufferPool pool = new org.apache.hadoop.util.DirectBufferPool(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java index 0dbfe3d6cdd36..592b0bdc4483e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java @@ -50,7 +50,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; public class TestGenericOptionsParser { File testDir; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestMachineList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestMachineList.java index d721c29530f17..4d26ac55e9cf8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestMachineList.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestMachineList.java @@ -25,9 +25,11 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;; import org.junit.Test; -import org.mockito.Mockito; public class TestMachineList { private static String IP_LIST = "10.119.103.110,10.119.103.112,10.119.103.114"; @@ -43,10 +45,40 @@ public class TestMachineList { private static String HOSTNAME_IP_CIDR_LIST = "host1,10.222.0.0/16,10.119.103.110,10.119.103.112,10.119.103.114,10.241.23.0/24,host4,"; + class TestAddressFactory extends MachineList.InetAddressFactory { + private Map cache = new HashMap<>(); + InetAddress put(String ip) throws UnknownHostException { + return put(ip, ip); + } + InetAddress put(String ip, String... hosts) throws UnknownHostException { + InetAddress addr = InetAddress.getByName(ip); + for (String host : hosts) { + addr = InetAddress.getByAddress(host, addr.getAddress()); + cache.put(host, addr); + // last host wins the PTR lookup. + cache.put(ip, addr); + } + return addr; + } + @Override + public InetAddress getByName(String host) throws UnknownHostException { + InetAddress addr = cache.get(host); + if (addr == null) { + if (!InetAddresses.isInetAddress(host)) { + throw new UnknownHostException(host); + } + // ip resolves to itself to fake being unresolvable. + addr = InetAddress.getByName(host); + addr = InetAddress.getByAddress(host, addr.getAddress()); + } + return addr; + } + } + @Test public void testWildCard() { //create MachineList with a list of of IPs - MachineList ml = new MachineList("*"); + MachineList ml = new MachineList("*", new TestAddressFactory()); //test for inclusion with any IP assertTrue(ml.includes("10.119.103.112")); @@ -56,7 +88,7 @@ public void testWildCard() { @Test public void testIPList() { //create MachineList with a list of of IPs - MachineList ml = new MachineList(IP_LIST); + MachineList ml = new MachineList(IP_LIST, new TestAddressFactory()); //test for inclusion with an known IP assertTrue(ml.includes("10.119.103.112")); @@ -68,7 +100,7 @@ public void testIPList() { @Test public void testIPListSpaces() { //create MachineList with a ip string which has duplicate ip and spaces - MachineList ml = new MachineList(IP_LIST_SPACES); + MachineList ml = new MachineList(IP_LIST_SPACES, new TestAddressFactory()); //test for inclusion with an known IP assertTrue(ml.includes("10.119.103.112")); @@ -79,42 +111,28 @@ public void testIPListSpaces() { @Test public void testStaticIPHostNameList()throws UnknownHostException { - //create MachineList with a list of of Hostnames - InetAddress addressHost1 = InetAddress.getByName("1.2.3.1"); - InetAddress addressHost4 = InetAddress.getByName("1.2.3.4"); - - MachineList.InetAddressFactory addressFactory = - Mockito.mock(MachineList.InetAddressFactory.class); - Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1); - Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4); + // create MachineList with a list of of Hostnames + TestAddressFactory addressFactory = new TestAddressFactory(); + addressFactory.put("1.2.3.1", "host1"); + addressFactory.put("1.2.3.4", "host4"); MachineList ml = new MachineList( StringUtils.getTrimmedStringCollection(HOST_LIST), addressFactory); - //test for inclusion with an known IP + // test for inclusion with an known IP assertTrue(ml.includes("1.2.3.4")); - //test for exclusion with an unknown IP + // test for exclusion with an unknown IP assertFalse(ml.includes("1.2.3.5")); } @Test public void testHostNames() throws UnknownHostException { - //create MachineList with a list of of Hostnames - InetAddress addressHost1 = InetAddress.getByName("1.2.3.1"); - InetAddress addressHost4 = InetAddress.getByName("1.2.3.4"); - InetAddress addressMockHost4 = Mockito.mock(InetAddress.class); - Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("differentName"); - - InetAddress addressMockHost5 = Mockito.mock(InetAddress.class); - Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5"); - - MachineList.InetAddressFactory addressFactory = - Mockito.mock(MachineList.InetAddressFactory.class); - Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4); - Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5); - Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1); - Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4); + // create MachineList with a list of of Hostnames + TestAddressFactory addressFactory = new TestAddressFactory(); + addressFactory.put("1.2.3.1", "host1"); + addressFactory.put("1.2.3.4", "host4", "differentname"); + addressFactory.put("1.2.3.5", "host5"); MachineList ml = new MachineList( StringUtils.getTrimmedStringCollection(HOST_LIST), addressFactory ); @@ -128,21 +146,11 @@ public void testHostNames() throws UnknownHostException { @Test public void testHostNamesReverserIpMatch() throws UnknownHostException { - //create MachineList with a list of of Hostnames - InetAddress addressHost1 = InetAddress.getByName("1.2.3.1"); - InetAddress addressHost4 = InetAddress.getByName("1.2.3.4"); - InetAddress addressMockHost4 = Mockito.mock(InetAddress.class); - Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("host4"); - - InetAddress addressMockHost5 = Mockito.mock(InetAddress.class); - Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5"); - - MachineList.InetAddressFactory addressFactory = - Mockito.mock(MachineList.InetAddressFactory.class); - Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4); - Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5); - Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1); - Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4); + // create MachineList with a list of of Hostnames + TestAddressFactory addressFactory = new TestAddressFactory(); + addressFactory.put("1.2.3.1", "host1"); + addressFactory.put("1.2.3.4", "host4"); + addressFactory.put("1.2.3.5", "host5"); MachineList ml = new MachineList( StringUtils.getTrimmedStringCollection(HOST_LIST), addressFactory ); @@ -157,7 +165,7 @@ public void testHostNamesReverserIpMatch() throws UnknownHostException { @Test public void testCIDRs() { //create MachineList with a list of of ip ranges specified in CIDR format - MachineList ml = new MachineList(CIDR_LIST); + MachineList ml = new MachineList(CIDR_LIST, new TestAddressFactory()); //test for inclusion/exclusion assertFalse(ml.includes("10.221.255.255")); @@ -181,16 +189,17 @@ public void testCIDRs() { @Test(expected = IllegalArgumentException.class) public void testNullIpAddress() { //create MachineList with a list of of ip ranges specified in CIDR format - MachineList ml = new MachineList(CIDR_LIST); + MachineList ml = new MachineList(CIDR_LIST, new TestAddressFactory()); //test for exclusion with a null IP - assertFalse(ml.includes(null)); + assertFalse(ml.includes((String) null)); + assertFalse(ml.includes((InetAddress) null)); } @Test public void testCIDRWith16bitmask() { //create MachineList with a list of of ip ranges specified in CIDR format - MachineList ml = new MachineList(CIDR_LIST1); + MachineList ml = new MachineList(CIDR_LIST1, new TestAddressFactory()); //test for inclusion/exclusion assertFalse(ml.includes("10.221.255.255")); @@ -209,7 +218,7 @@ public void testCIDRWith16bitmask() { @Test public void testCIDRWith8BitMask() { //create MachineList with a list of of ip ranges specified in CIDR format - MachineList ml = new MachineList(CIDR_LIST2); + MachineList ml = new MachineList(CIDR_LIST2, new TestAddressFactory()); //test for inclusion/exclusion assertFalse(ml.includes("10.241.22.255")); @@ -228,7 +237,7 @@ public void testCIDRWith8BitMask() { public void testInvalidCIDR() { //create MachineList with an Invalid CIDR try { - new MachineList(INVALID_CIDR); + MachineList ml = new MachineList(INVALID_CIDR, new TestAddressFactory()); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { //expected Exception @@ -240,7 +249,7 @@ public void testInvalidCIDR() { @Test public void testIPandCIDRs() { //create MachineList with a list of of ip ranges and ip addresses - MachineList ml = new MachineList(IP_CIDR_LIST); + MachineList ml = new MachineList(IP_CIDR_LIST, new TestAddressFactory()); //test for inclusion with an known IP assertTrue(ml.includes("10.119.103.112")); @@ -263,7 +272,8 @@ public void testIPandCIDRs() { @Test public void testHostNameIPandCIDRs() { //create MachineList with a mix of ip addresses , hostnames and ip ranges - MachineList ml = new MachineList(HOSTNAME_IP_CIDR_LIST); + MachineList ml = new MachineList(HOSTNAME_IP_CIDR_LIST, + new TestAddressFactory()); //test for inclusion with an known IP assertTrue(ml.includes("10.119.103.112")); @@ -286,7 +296,8 @@ public void testHostNameIPandCIDRs() { @Test public void testGetCollection() { //create MachineList with a mix of ip addresses , hostnames and ip ranges - MachineList ml = new MachineList(HOSTNAME_IP_CIDR_LIST); + MachineList ml = + new MachineList(HOSTNAME_IP_CIDR_LIST, new TestAddressFactory()); Collection col = ml.getCollection(); //test getCollectionton to return the full collection diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java index 58874fdcdfba6..98b75bba4793a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java @@ -21,8 +21,6 @@ import static org.junit.Assert.*; import org.apache.hadoop.crypto.OpensslCipher; -import org.apache.hadoop.io.compress.Lz4Codec; -import org.apache.hadoop.io.compress.SnappyCodec; import org.apache.hadoop.io.compress.zlib.ZlibFactory; import org.apache.hadoop.util.NativeCodeLoader; import org.slf4j.Logger; @@ -52,13 +50,9 @@ public void testNativeCodeLoaded() { // library names are depended on platform and build envs // so just check names are available assertFalse(ZlibFactory.getLibraryName().isEmpty()); - if (NativeCodeLoader.buildSupportsSnappy()) { - assertFalse(SnappyCodec.getLibraryName().isEmpty()); - } if (NativeCodeLoader.buildSupportsOpenssl()) { assertFalse(OpensslCipher.getLibraryName().isEmpty()); } - assertFalse(Lz4Codec.getLibraryName().isEmpty()); LOG.info("TestNativeCodeLoader: libhadoop.so is loaded."); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java index 578d267114128..c9f398da563e2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.util; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.commons.io.FileUtils; import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider; import org.junit.Assert; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java index 3d985e40fb878..93790eb1350a7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java @@ -31,8 +31,8 @@ import org.apache.zookeeper.data.ACL; import org.junit.Test; -import com.google.common.base.Charsets; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; public class TestZKUtil { private static final String TEST_ROOT_DIR = GenericTestUtils.getTempPath( diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java index ca018a64c5e81..f43930dd07a5e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java @@ -33,8 +33,8 @@ import org.apache.hadoop.util.hash.Hash; import org.apache.log4j.Logger; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class BloomFilterCommonTester { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java index fbbb6d81935cd..cfd9628885d4f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java @@ -31,9 +31,9 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestBloomFilters { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/functional/TestRemoteIterators.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/functional/TestRemoteIterators.java new file mode 100644 index 0000000000000..8cd5c58585e6f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/functional/TestRemoteIterators.java @@ -0,0 +1,469 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.functional; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Iterator; +import java.util.NoSuchElementException; + +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.extractStatistics; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.util.functional.RemoteIterators.*; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Test for {@link RemoteIterators}. + * + */ +public class TestRemoteIterators extends AbstractHadoopTestBase { + + private static final Logger LOG = LoggerFactory.getLogger( + TestRemoteIterators.class); + + private static final String[] DATA = {"a", "b", "c"}; + + /** Counter for lambda-expressions. */ + private int counter; + + @Test + public void testIterateArray() throws Throwable { + verifyInvoked(remoteIteratorFromArray(DATA), DATA.length, + (s) -> LOG.info(s)); + } + + @Test + public void testIterateArrayMapped() throws Throwable { + verifyInvoked( + mappingRemoteIterator( + remoteIteratorFromArray(DATA), + (d) -> { + counter += d.length(); + return d; + }), + DATA.length, + this::log); + assertCounterValue(3); + } + + public void log(Object o) { + LOG.info("{}", o); + } + + /** + * Singleton is iterated through once. + * The toString() call is passed through. + */ + @Test + public void testSingleton() throws Throwable { + StringBuffer result = new StringBuffer(); + String name = "singleton"; + RemoteIterator it = remoteIteratorFromSingleton(name); + assertStringValueContains(it, "SingletonIterator"); + assertStringValueContains(it, name); + verifyInvoked( + it, + 1, + (s) -> result.append(s)); + assertThat(result.toString()) + .isEqualTo(name); + } + + @Test + public void testSingletonNotClosed() throws Throwable { + CloseCounter closeCounter = new CloseCounter(); + RemoteIterator it = remoteIteratorFromSingleton(closeCounter); + verifyInvoked(it, 1, this::log); + close(it); + closeCounter.assertCloseCount(0); + } + + /** + * A null singleton is not an error. + */ + @Test + public void testNullSingleton() throws Throwable { + verifyInvoked(remoteIteratorFromSingleton(null), 0, this::log); + } + + + /** + * If you create a singleton iterator and it is an IOStatisticsSource, + * then that is the statistics which can be extracted from the + * iterator. + */ + @Test + public void testSingletonStats() throws Throwable { + IOStatsInstance singleton = new IOStatsInstance(); + RemoteIterator it + = remoteIteratorFromSingleton(singleton); + extractStatistics(it); + } + + /** + * The mapping remote iterator passes IOStatistics + * calls down. + */ + @Test + public void testMappedSingletonStats() throws Throwable { + IOStatsInstance singleton = new IOStatsInstance(); + RemoteIterator it + = mappingRemoteIterator(remoteIteratorFromSingleton(singleton), + Object::toString); + verifyInvoked(it, 1, this::log); + extractStatistics(it); + } + + /** + * Close() calls are passed through. + */ + @Test + public void testClosePassthrough() throws Throwable { + CountdownRemoteIterator countdown = new CountdownRemoteIterator(0); + RemoteIterator it = mappingRemoteIterator( + countdown, + i -> i); + verifyInvoked(it, 0, this::log); + // the foreach() operation called close() + countdown.assertCloseCount(1); + extractStatistics(countdown); + ((Closeable)it).close(); + countdown.assertCloseCount(1); + } + + @Test + public void testMapping() throws Throwable { + CountdownRemoteIterator countdown = new CountdownRemoteIterator(100); + RemoteIterator it = mappingRemoteIterator( + countdown, + i -> i); + verifyInvoked(it, 100, c -> counter++); + assertCounterValue(100); + extractStatistics(it); + assertStringValueContains(it, "CountdownRemoteIterator"); + close(it); + countdown.assertCloseCount(1); + } + + @Test + public void testFiltering() throws Throwable { + CountdownRemoteIterator countdown = new CountdownRemoteIterator(100); + // only even numbers are passed through + RemoteIterator it = filteringRemoteIterator( + countdown, + i -> (i % 2) == 0); + verifyInvoked(it, 50, c -> counter++); + assertCounterValue(50); + extractStatistics(it); + close(it); + countdown.assertCloseCount(1); + } + + /** + * A filter which accepts nothing results in + * an empty iteration. + */ + @Test + public void testFilterNoneAccepted() throws Throwable { + // nothing gets through + RemoteIterator it = filteringRemoteIterator( + new CountdownRemoteIterator(100), + i -> false); + verifyInvoked(it, 0, c -> counter++); + assertCounterValue(0); + extractStatistics(it); + } + + @Test + public void testFilterAllAccepted() throws Throwable { + // nothing gets through + RemoteIterator it = filteringRemoteIterator( + new CountdownRemoteIterator(100), + i -> true); + verifyInvoked(it, 100, c -> counter++); + assertStringValueContains(it, "CountdownRemoteIterator"); + } + + @Test + public void testJavaIteratorSupport() throws Throwable { + CountdownIterator countdownIterator = new CountdownIterator(100); + RemoteIterator it = remoteIteratorFromIterator( + countdownIterator); + verifyInvoked(it, 100, c -> counter++); + assertStringValueContains(it, "CountdownIterator"); + extractStatistics(it); + close(it); + countdownIterator.assertCloseCount(1); + } + + @Test + public void testJavaIterableSupport() throws Throwable { + CountdownIterable countdown = new CountdownIterable(100); + RemoteIterator it = remoteIteratorFromIterable( + countdown); + verifyInvoked(it, 100, c -> counter++); + assertStringValueContains(it, "CountdownIterator"); + extractStatistics(it); + // close the iterator + close(it); + countdown.assertCloseCount(0); + // and a new iterator can be crated + verifyInvoked(remoteIteratorFromIterable(countdown), + 100, c -> counter++); + } + + /** + * If a RemoteIterator is constructed from an iterable + * and that is to be closed, we close it. + */ + @Test + public void testJavaIterableClose() throws Throwable { + CountdownIterable countdown = new CountdownIterable(100); + RemoteIterator it = closingRemoteIterator( + remoteIteratorFromIterable(countdown), + countdown); + verifyInvoked(it, 100, c -> counter++); + assertStringValueContains(it, "CountdownIterator"); + extractStatistics(it); + + // verify the iterator was self closed in hasNext() + countdown.assertCloseCount(1); + + // explicitly close the iterator + close(it); + countdown.assertCloseCount(1); + // and a new iterator cannot be created + intercept(IllegalStateException.class, () -> + remoteIteratorFromIterable(countdown)); + } + + /** + * If a RemoteIterator is constructed from an iterable + * and that is to be closed, we close it. + */ + @SuppressWarnings("InfiniteLoopStatement") + @Test + public void testJavaIterableCloseInNextLoop() throws Throwable { + CountdownIterable countdown = new CountdownIterable(100); + RemoteIterator it = closingRemoteIterator( + remoteIteratorFromIterable(countdown), + countdown); + try { + while(true) { + it.next(); + } + } catch (NoSuchElementException expected) { + + } + // verify the iterator was self closed in next() + countdown.assertCloseCount(1); + + } + + /** + * assert that the string value of an object contains the + * expected text. + * @param o object + * @param expected expected text + */ + protected void assertStringValueContains( + final Object o, + final String expected) { + assertThat(o.toString()) + .describedAs("Object string value") + .contains(expected); + } + + /** + * Assert that the counter field is at a specific value. + * @param expected counter + */ + protected void assertCounterValue(final int expected) { + assertThat(counter) + .describedAs("Counter value") + .isEqualTo(expected); + } + + /** + * Verify that the iteration completes with a given size. + * @param it iterator + * @param type. + * @param length expected size + * @param consumer consumer + */ + protected void verifyInvoked(final RemoteIterator it, + int length, + ConsumerRaisingIOE consumer) + throws IOException { + assertThat(foreach(it, consumer)) + .describedAs("Scan through iterator %s", it) + .isEqualTo(length); + } + + /** + * Close an iterator if it is iterable. + * @param it iterator + * @param type. + */ + private void close(final RemoteIterator it) throws IOException { + if (it instanceof Closeable) { + ((Closeable) it).close(); + } + } + + /** + * Class whose close() call increments a counter. + */ + private static class CloseCounter extends + IOStatsInstance implements Closeable { + + private int closeCount; + + @Override + public void close() throws IOException { + closeCount++; + LOG.info("close ${}", closeCount); + } + + public int getCloseCount() { + return closeCount; + } + + public void reset() { + closeCount = 0; + } + + public void assertCloseCount(int expected) { + assertThat(closeCount) + .describedAs("Close count") + .isEqualTo(expected); + } + + } + + /** + * Simple class to implement IOStatistics. + */ + private static class IOStatsInstance implements IOStatisticsSource { + + private IOStatisticsSnapshot stats = new IOStatisticsSnapshot(); + + @Override + public IOStatistics getIOStatistics() { + return stats; + } + + } + + /** + * Iterator which counts down. + */ + private static final class CountdownRemoteIterator extends CloseCounter + implements RemoteIterator { + + private int limit; + + private CountdownRemoteIterator(final int limit) { + this.limit = limit; + } + + @Override + public boolean hasNext() throws IOException { + return limit > 0; + } + + @Override + public Integer next() throws IOException { + return limit--; + } + + @Override + public String toString() { + return "CountdownRemoteIterator{" + + "limit=" + limit + + '}'; + } + } + + /** + * Iterator which counts down. + */ + private static final class CountdownIterator extends CloseCounter + implements Iterator { + + private int limit; + + private CountdownIterator(final int limit) { + this.limit = limit; + } + + @Override + public boolean hasNext() { + return limit > 0; + } + + @Override + public Integer next() { + if (!hasNext()) { + throw new NoSuchElementException("limit reached"); + } + return limit--; + } + + @Override + public String toString() { + return "CountdownIterator{" + + "limit=" + limit + + '}'; + } + } + + /** + * Iterable for countdown iterators. + * Once closed, calls to iterator() raise an exception. + */ + private static final class CountdownIterable extends CloseCounter + implements Iterable { + + private int limit; + + private CountdownIterable(final int limit) { + this.limit = limit; + } + + @Override + public Iterator iterator() { + Preconditions.checkState(getCloseCount() == 0); + + return new CountdownIterator(limit); + } + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml index b261a63be7df7..03bb3e800fba8 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml @@ -121,4 +121,14 @@ case sensitivity and permission options are determined at run time from OS type true + + fs.contract.supports-settimes + true + + + + fs.contract.supports-getfilestatus + true + + diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml index 8cbd4a0abcf38..198ca566e25a7 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/contract/rawlocal.xml @@ -127,4 +127,19 @@ true + + fs.contract.supports-hflush + true + + + + fs.contract.supports-hsync + true + + + + fs.contract.metadata_updated_on_hsync + true + + diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/sftp.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/sftp.xml new file mode 100644 index 0000000000000..20a24b7e54061 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/resources/contract/sftp.xml @@ -0,0 +1,79 @@ + + + + + + + fs.contract.test.root-tests-enabled + false + + + + fs.contract.is-case-sensitive + true + + + + fs.contract.supports-append + false + + + + fs.contract.supports-atomic-directory-delete + true + + + + fs.contract.supports-atomic-rename + true + + + + fs.contract.supports-block-locality + false + + + + fs.contract.supports-concat + false + + + + fs.contract.supports-seek + true + + + + fs.contract.rejects-seek-past-eof + true + + + + fs.contract.supports-strict-exceptions + true + + + + fs.contract.supports-unix-permissions + false + + + diff --git a/hadoop-common-project/hadoop-common/src/test/resources/lz4/.sequencefile.crc b/hadoop-common-project/hadoop-common/src/test/resources/lz4/.sequencefile.crc new file mode 100644 index 0000000000000..b36bc54a7c599 Binary files /dev/null and b/hadoop-common-project/hadoop-common/src/test/resources/lz4/.sequencefile.crc differ diff --git a/hadoop-common-project/hadoop-common/src/test/resources/lz4/sequencefile b/hadoop-common-project/hadoop-common/src/test/resources/lz4/sequencefile new file mode 100644 index 0000000000000..eca7cdea3b323 Binary files /dev/null and b/hadoop-common-project/hadoop-common/src/test/resources/lz4/sequencefile differ diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml index 392d39170d5fe..6ebb9c461ba1c 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml @@ -496,7 +496,10 @@ RegexpComparator - ^-put \[-f\] \[-p\] \[-l\] \[-d\] <localsrc> \.\.\. <dst> :( )* + + RegexpComparator + ^-put \[-f\] \[-p\] \[-l\] \[-d\] \[-t <thread count>\] <localsrc> \.\.\. <dst> :\s* + RegexpComparator @@ -512,15 +515,19 @@ RegexpComparator - ^\s*-p Preserves access and modification times, ownership and the mode.( )* + ^\s*-p Preserves timestamps, ownership and the mode.( )* + + + RegexpComparator + ^\s*-f Overwrites the destination if it already exists.( )* RegexpComparator - ^\s*-f Overwrites the destination if it already exists.( )* + ^\s*-t <thread count> Number of threads to be used, default is 1.( )* RegexpComparator - ^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )* + ^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )* RegexpComparator @@ -532,7 +539,7 @@ RegexpComparator - ^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )* + ^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )* @@ -551,47 +558,7 @@ RegexpComparator - ^\s*Copy files from the local file system into fs.( )*Copying fails if the file already( )* - - - RegexpComparator - ^\s*exists, unless the -f flag is given.( )* - - - RegexpComparator - ^\s*Flags:( )* - - - RegexpComparator - ^\s*-p Preserves access and modification times, ownership and the( )* - - - RegexpComparator - ^\s*mode.( )* - - - RegexpComparator - ^\s*-f Overwrites the destination if it already exists.( )* - - - RegexpComparator - ^\s*-t <thread count> Number of threads to be used, default is 1.( )* - - - RegexpComparator - ^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )* - - - RegexpComparator - ^\s*replication factor of 1. This flag will result in reduced( )* - - - RegexpComparator - ^\s*durability. Use with care.( )* - - - RegexpComparator - ^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )* + ^\s*Identical to the -put command\.\s* @@ -606,11 +573,14 @@ RegexpComparator - ^-moveFromLocal <localsrc> \.\.\. <dst> :\s* + ^-moveFromLocal \[-f\] \[-p\] \[-l\] \[-d\] <localsrc> \.\.\. <dst> :\s* RegexpComparator - ^( |\t)*Same as -put, except that the source is deleted after it's copied. + ^( |\t)*Same as -put, except that the source is deleted after it's copied + + RegexpComparator + ^\s* and -t option has not yet implemented. @@ -849,7 +819,7 @@ RegexpComparator - ^-touch \[-a\] \[-m\] \[-t TIMESTAMP \] \[-c\] <path> \.\.\. :( )* + ^-touch \[-a\] \[-m\] \[-t TIMESTAMP \(yyyyMMdd\:HHmmss\) \] \[-c\] <path> \.\.\. :( )* RegexpComparator @@ -877,11 +847,11 @@ RegexpComparator - ^\s*-t\s+TIMESTAMP\s+Use specified timestamp \(in format yyyyMMddHHmmss\) instead of + ^\s*-t\s+TIMESTAMP\s+Use specified timestamp instead of current time( )* RegexpComparator - ^\s*current time( )* + ^\s*TIMESTAMP format yyyyMMdd\:HHmmss RegexpComparator diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index d1f2c226e509b..b5a2dcbd79aa1 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -22,11 +22,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-kms - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT jar Apache Hadoop KMS @@ -54,8 +54,8 @@ compile - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava compile @@ -237,8 +237,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java index b6b425443babc..be0f8d3fbc5d4 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.crypto.key.kms.server; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.KMSUtil; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java index ba0fe825b4eb1..1b75f9fee0659 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java @@ -35,7 +35,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Provides access to the AccessControlLists used by KMS, diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java index 13a2d5c57a74e..4c64a37feabbd 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.crypto.key.kms.server.KMSAuditLogger.AuditEvent; import static org.apache.hadoop.crypto.key.kms.server.KMSAuditLogger.OpStatus; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.server.KMSACLs.Type; import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyOpType; @@ -31,13 +31,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Strings; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalListener; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalNotification; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import java.util.HashSet; import java.util.concurrent.Callable; @@ -191,7 +191,7 @@ private void logEvent(final OpStatus status, AuditEvent event) { private void op(final OpStatus opStatus, final Object op, final UserGroupInformation ugi, final String key, final String remoteHost, final String extraMsg) { - final String user = ugi == null ? null: ugi.getShortUserName(); + final String user = ugi == null ? null: ugi.getUserName(); if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key) && (op != null) && AGGREGATE_OPS_WHITELIST.contains(op)) { diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuditLogger.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuditLogger.java index 2e2ba1d6a1b8f..2534a44912c99 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuditLogger.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuditLogger.java @@ -77,7 +77,7 @@ class AuditEvent { this.user = null; this.impersonator = null; } else { - this.user = ugi.getShortUserName(); + this.user = ugi.getUserName(); if (ugi.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.PROXY) { this.impersonator = ugi.getRealUser().getUserName(); diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java index da542ffb191e6..ead22e4686645 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.crypto.key.kms.server; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.KMSDelegationToken; @@ -28,6 +28,7 @@ import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler; import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler; import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticationHandler; +import org.eclipse.jetty.server.Response; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -113,6 +114,18 @@ public void setStatus(int sc) { public void sendError(int sc, String msg) throws IOException { statusCode = sc; this.msg = msg; + + ServletResponse response = getResponse(); + + // After Jetty 9.4.21, sendError() no longer allows a custom message. + // use setStatusWithReason() to set a custom message. + if (response instanceof Response) { + ((Response) response).setStatusWithReason(sc, msg); + } else { + KMS.LOG.warn("The wrapped response object is instance of {}" + + ", not org.eclipse.jetty.server.Response. Can't set custom error " + + "message", response.getClass()); + } super.sendError(sc, HtmlQuoting.quoteHtmlChars(msg)); } diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java index f3c0bbdda6183..dc2ba3261cfd8 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java @@ -21,7 +21,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import javax.servlet.Filter; import javax.servlet.FilterChain; diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java index da597b4da5f81..a92dd1045c01a 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java @@ -26,7 +26,7 @@ import com.codahale.metrics.JmxReporter; import com.codahale.metrics.Meter; import com.codahale.metrics.MetricRegistry; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.CachingKeyProvider; diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java index 7cfc010ac2c76..639d85521c3ce 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java @@ -22,12 +22,16 @@ import java.net.MalformedURLException; import java.net.URI; import java.net.URL; +import java.util.LinkedHashSet; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; +import org.apache.hadoop.security.AuthenticationFilterInitializer; +import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.JvmPauseMonitor; @@ -94,6 +98,22 @@ public class KMSWebServer { KMSConfiguration.HTTP_PORT_DEFAULT); URI endpoint = new URI(scheme, null, host, port, null, null, null); + String configuredInitializers = + conf.get(HttpServer2.FILTER_INITIALIZER_PROPERTY); + if (configuredInitializers != null) { + Set target = new LinkedHashSet(); + String[] initializers = configuredInitializers.split(","); + for (String init : initializers) { + if (!init.equals(AuthenticationFilterInitializer.class.getName()) && + !init.equals( + ProxyUserAuthenticationFilterInitializer.class.getName())) { + target.add(init); + } + } + String actualInitializers = StringUtils.join(",", target); + conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, actualInitializers); + } + httpServer = new HttpServer2.Builder() .setName(NAME) .setConf(conf) diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java index 101591b0310d2..fe3207b31c27a 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java @@ -32,9 +32,9 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; /** * A {@link KeyProvider} proxy that checks whether the current user derived via diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/SimpleKMSAuditLogger.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/SimpleKMSAuditLogger.java index 4dcbe2c54f2f7..74825097f3038 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/SimpleKMSAuditLogger.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/SimpleKMSAuditLogger.java @@ -23,8 +23,8 @@ import java.util.LinkedList; import java.util.List; -import com.google.common.base.Joiner; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml index 783f4e6c03b2a..134326f5312f3 100644 --- a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml +++ b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml @@ -103,7 +103,7 @@ hadoop.http.idle_timeout.ms - 1000 + 60000 KMS Server connection timeout in milliseconds. diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm index 5490219750020..95e926b3561a1 100644 --- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm +++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm @@ -307,7 +307,7 @@ Configure `etc/hadoop/ssl-server.xml` with proper values, for example: ``` The SSL passwords can be secured by a credential provider. See -[Credential Provider API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). +[Credential Provider API](../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). You need to create an SSL certificate for the KMS. As the `kms` Unix user, using the Java `keytool` command to create the SSL certificate: @@ -716,7 +716,7 @@ $H4 HTTP Kerberos Principals Configuration When KMS instances are behind a load-balancer or VIP, clients will use the hostname of the VIP. For Kerberos SPNEGO authentication, the hostname of the URL is used to construct the Kerberos service name of the server, `HTTP/#HOSTNAME#`. This means that all KMS instances must have a Kerberos service name with the load-balancer or VIP hostname. -In order to be able to access directly a specific KMS instance, the KMS instance must also have Keberos service name with its own hostname. This is required for monitoring and admin purposes. +In order to be able to access directly a specific KMS instance, the KMS instance must also have Kerberos service name with its own hostname. This is required for monitoring and admin purposes. Both Kerberos service principal credentials (for the load-balancer/VIP hostname and for the actual KMS instance hostname) must be in the keytab file configured for authentication. And the principal name specified in the configuration must be '\*'. For example: diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java index faa8fa280a9cf..bc4bbc3df70bd 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java @@ -26,7 +26,7 @@ import java.io.Writer; import java.net.URL; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index 3b511a1c5c488..3d59e6f5be7b7 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.crypto.key.kms.server; -import com.google.common.base.Supplier; -import com.google.common.cache.LoadingCache; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import org.apache.curator.test.TestingServer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProviderFactory; @@ -38,6 +38,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -3079,4 +3080,45 @@ public Void call() throws Exception { } }); } + + @Test + public void testFilterInitializer() throws Exception { + Configuration conf = new Configuration(); + File testDir = getTestDir(); + conf = createBaseKMSConf(testDir, conf); + conf.set("hadoop.security.authentication", "kerberos"); + conf.set("hadoop.kms.authentication.token.validity", "1"); + conf.set("hadoop.kms.authentication.type", "kerberos"); + conf.set("hadoop.kms.authentication.kerberos.keytab", + keytab.getAbsolutePath()); + conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost"); + conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); + conf.set("hadoop.http.filter.initializers", + AuthenticationFilterInitializer.class.getName()); + conf.set("hadoop.http.authentication.type", "kerberos"); + conf.set("hadoop.http.authentication.kerberos.principal", "HTTP/localhost"); + conf.set("hadoop.http.authentication.kerberos.keytab", + keytab.getAbsolutePath()); + + writeConf(testDir, conf); + + runServer(null, null, testDir, new KMSCallable() { + @Override + public Void call() throws Exception { + final Configuration conf = new Configuration(); + URL url = getKMSUrl(); + final URI uri = createKMSUri(getKMSUrl()); + + doAs("client", new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + final KeyProvider kp = createProvider(uri, conf); + Assert.assertTrue(kp.getKeys().isEmpty()); + return null; + } + }); + return null; + } + }); + } } diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java index 09145be28a0df..2f47ed794ac84 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java @@ -40,7 +40,6 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; -import org.mockito.Mockito; public class TestKMSAudit { @@ -50,6 +49,8 @@ public class TestKMSAudit { private PrintStream capturedOut; private KMSAudit kmsAudit; + private UserGroupInformation luser = + UserGroupInformation.createUserForTesting("luser@REALM", new String[0]); private static class FilterOut extends FilterOutputStream { public FilterOut(OutputStream out) { @@ -95,10 +96,7 @@ private String getAndResetLogOutput() { } @Test - @SuppressWarnings("checkstyle:linelength") public void testAggregation() throws Exception { - UserGroupInformation luser = Mockito.mock(UserGroupInformation.class); - Mockito.when(luser.getShortUserName()).thenReturn("luser"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg"); @@ -120,27 +118,30 @@ public void testAggregation() throws Exception { kmsAudit.evictCacheForTesting(); String out = getAndResetLogOutput(); System.out.println(out); - Assert.assertTrue( - out.matches( - "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" + boolean doesMatch = out.matches( + "OK\\[op=DECRYPT_EEK, key=k1, user=luser@REALM, accessCount=1, " + + "interval=[^m]{1,4}ms\\] testmsg" // Not aggregated !! - + "OK\\[op=DELETE_KEY, key=k1, user=luser\\] testmsg" - + "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser\\] testmsg" - + "OK\\[op=INVALIDATE_CACHE, key=k1, user=luser\\] testmsg" + + "OK\\[op=DELETE_KEY, key=k1, user=luser@REALM\\] testmsg" + + "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser@REALM\\] testmsg" + + "OK\\[op=INVALIDATE_CACHE, key=k1, user=luser@REALM\\] testmsg" // Aggregated - + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg" - + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" - + "OK\\[op=REENCRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" - + "OK\\[op=REENCRYPT_EEK, key=k1, user=luser, accessCount=3, interval=[^m]{1,4}ms\\] testmsg" - + "OK\\[op=REENCRYPT_EEK_BATCH, key=k1, user=luser\\] testmsg" - + "OK\\[op=REENCRYPT_EEK_BATCH, key=k1, user=luser\\] testmsg")); + + "OK\\[op=DECRYPT_EEK, key=k1, user=luser@REALM, accessCount=6, " + + "interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=DECRYPT_EEK, key=k1, user=luser@REALM, accessCount=1, " + + "interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=REENCRYPT_EEK, key=k1, user=luser@REALM, " + + "accessCount=1, interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=REENCRYPT_EEK, key=k1, user=luser@REALM, " + + "accessCount=3, interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=REENCRYPT_EEK_BATCH, key=k1, user=luser@REALM\\] testmsg" + + "OK\\[op=REENCRYPT_EEK_BATCH, key=k1, user=luser@REALM\\] " + + "testmsg"); + Assert.assertTrue(doesMatch); } @Test - @SuppressWarnings("checkstyle:linelength") public void testAggregationUnauth() throws Exception { - UserGroupInformation luser = Mockito.mock(UserGroupInformation.class); - Mockito.when(luser.getShortUserName()).thenReturn("luser"); kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k2"); kmsAudit.evictCacheForTesting(); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg"); @@ -159,25 +160,29 @@ public void testAggregationUnauth() throws Exception { // The UNAUTHORIZED will trigger cache invalidation, which then triggers // the aggregated OK (accessCount=5). But the order of the UNAUTHORIZED and // the aggregated OK is arbitrary - no correctness concerns, but flaky here. - Assert.assertTrue(out.matches( - "UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser\\] " - + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" - + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=5, interval=[^m]{1,4}ms\\] testmsg" - + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser\\] " - + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg") - || out.matches( - "UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser\\] " - + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" - + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser\\] " - + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=5, interval=[^m]{1,4}ms\\] testmsg" - + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg")); + boolean doesMatch = out.matches( + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser@REALM\\] " + + "OK\\[op=GENERATE_EEK, key=k3, user=luser@REALM, accessCount=1," + + " interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=GENERATE_EEK, key=k3, user=luser@REALM, accessCount=5," + + " interval=[^m]{1,4}ms\\] testmsg" + + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser@REALM\\] " + + "OK\\[op=GENERATE_EEK, key=k3, user=luser@REALM, accessCount=1," + + " interval=[^m]{1,4}ms\\] testmsg"); + doesMatch = doesMatch || out.matches( + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser@REALM\\] " + + "OK\\[op=GENERATE_EEK, key=k3, user=luser@REALM, accessCount=1," + + " interval=[^m]{1,4}ms\\] testmsg" + + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser@REALM\\] " + + "OK\\[op=GENERATE_EEK, key=k3, user=luser@REALM, accessCount=5," + + " interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=GENERATE_EEK, key=k3, user=luser@REALM, accessCount=1," + + " interval=[^m]{1,4}ms\\] testmsg"); + Assert.assertTrue(doesMatch); } @Test - @SuppressWarnings("checkstyle:linelength") public void testAuditLogFormat() throws Exception { - UserGroupInformation luser = Mockito.mock(UserGroupInformation.class); - Mockito.when(luser.getShortUserName()).thenReturn("luser"); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k4", "testmsg"); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "testmsg"); kmsAudit.evictCacheForTesting(); @@ -187,12 +192,15 @@ public void testAuditLogFormat() throws Exception { String out = getAndResetLogOutput(); System.out.println(out); Assert.assertTrue(out.matches( - "OK\\[op=GENERATE_EEK, key=k4, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" - + "OK\\[op=GENERATE_EEK, user=luser\\] testmsg" - + "OK\\[op=GENERATE_EEK, key=k4, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" - + "UNAUTHORIZED\\[op=DECRYPT_EEK, key=k4, user=luser\\] " - + "ERROR\\[user=luser\\] Method:'method' Exception:'testmsg'" - + "UNAUTHENTICATED RemoteHost:remotehost Method:method URL:url ErrorMsg:'testmsg'")); + "OK\\[op=GENERATE_EEK, key=k4, user=luser@REALM, accessCount=1, " + + "interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=GENERATE_EEK, user=luser@REALM\\] testmsg" + + "OK\\[op=GENERATE_EEK, key=k4, user=luser@REALM, accessCount=1," + + " interval=[^m]{1,4}ms\\] testmsg" + + "UNAUTHORIZED\\[op=DECRYPT_EEK, key=k4, user=luser@REALM\\] " + + "ERROR\\[user=luser@REALM\\] Method:'method' Exception:'testmsg'" + + "UNAUTHENTICATED RemoteHost:remotehost Method:method URL:url " + + "ErrorMsg:'testmsg'")); } @SuppressWarnings("unchecked") diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index adbd6e32bee58..28a477dae54f1 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -18,12 +18,12 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project 4.0.0 hadoop-minikdc - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MiniKDC Apache Hadoop MiniKDC jar @@ -53,8 +53,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index e0fedaf1434e6..b9b10dcf4daaf 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-nfs - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT jar Apache Hadoop NFS @@ -94,8 +94,8 @@ compile - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava org.assertj @@ -107,8 +107,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java index 3d5088d7006c4..97b8a444ac28a 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java @@ -32,7 +32,7 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java index 4d31a8203f24e..46e4f7259d808 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java @@ -23,7 +23,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * LOOKUP3 Request diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java index 5898ec588ff31..e2f3e2ff8e03a 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java @@ -22,7 +22,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.oncrpc.XDR; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * READ3 Request diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java index 5bde2c0f69a8f..6fbfd5f0c6671 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java @@ -28,7 +28,7 @@ import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.Verifier; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * READDIR3 Response diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java index cf32bd1c87865..5e814c488e7cd 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java @@ -30,7 +30,7 @@ import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.Verifier; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * READDIRPLUS3 Response diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java index 2d6f30ecb56cb..8632a387c6032 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java @@ -23,7 +23,7 @@ import java.util.Map; import java.util.Map.Entry; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class is used for handling the duplicate non-idempotenty Rpc diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java index 5c059aa4550a4..d4b2261e05f22 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java @@ -22,7 +22,7 @@ import java.net.InetSocketAddress; import java.net.SocketAddress; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState; import org.apache.hadoop.oncrpc.security.Verifier; import org.apache.hadoop.oncrpc.security.VerifierNone; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java index e866a5c419cd3..985629e0285cb 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java @@ -20,7 +20,7 @@ import org.apache.hadoop.oncrpc.security.RpcAuthInfo; import org.apache.hadoop.oncrpc.security.Verifier; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Represents an RPC message of type RPC reply as defined in RFC 1831 diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java index acdc51c954521..419eff831f0e7 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java @@ -23,8 +23,8 @@ import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Utility class for building XDR messages based on RFC 4506. diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java index 64edf485b29f7..fd832c4ab2409 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.oncrpc.security; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.oncrpc.XDR; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java index 753edba49fbda..f62dc6bd223b7 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java @@ -19,7 +19,7 @@ import org.apache.hadoop.oncrpc.XDR; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** Credential used by AUTH_NONE */ public class CredentialsNone extends Credentials { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java index 19ba32022eef7..8713d210f4678 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java @@ -21,7 +21,7 @@ import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.oncrpc.XDR; /** Credential used by AUTH_SYS */ diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java index 8bccd1b9be247..1f8ad7b37138b 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java @@ -19,7 +19,7 @@ import org.apache.hadoop.oncrpc.XDR; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** Verifier used by AUTH_NONE. */ public class VerifierNone extends Verifier { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java index 123999d5e14c0..80f43828ea83b 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java @@ -38,7 +38,7 @@ import org.jboss.netty.handler.timeout.IdleStateHandler; import org.jboss.netty.util.HashedWheelTimer; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml index f3454e57b6d2f..dcf34f763658e 100644 --- a/hadoop-common-project/hadoop-registry/pom.xml +++ b/hadoop-common-project/hadoop-registry/pom.xml @@ -19,12 +19,12 @@ hadoop-project org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project 4.0.0 hadoop-registry - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Registry @@ -126,8 +126,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava @@ -163,10 +163,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${project.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java index 480ce0ed5fb20..a1349f3e26f00 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Map; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java index 1a8bb3ec0262a..8a26b4b450def 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.api; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.registry.server.dns.RegistryDNS; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java index 5f9c5f37508c1..786bec040b22d 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.api; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.ServiceStateException; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java index b8e9ba1bd7fbe..09df00d083c3e 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.binding; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PathNotFoundException; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java index 05df3255e3a76..9a4369cdda385 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.binding; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.registry.client.exceptions.InvalidRecordException; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java index 1b839c253b14d..d862fe649b5ac 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java @@ -18,8 +18,8 @@ package org.apache.hadoop.registry.client.binding; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java index 41884a984844f..6a08dcc074725 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java @@ -47,8 +47,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Filesystem-based implementation of RegistryOperations. This class relies diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java index 2eb7aa54f09bc..a01b7151b6989 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java @@ -18,8 +18,8 @@ package org.apache.hadoop.registry.client.impl.zk; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.curator.ensemble.EnsembleProvider; import org.apache.curator.ensemble.fixed.FixedEnsembleProvider; import org.apache.curator.framework.CuratorFramework; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java index 4c911da156b8e..e46a016baa07d 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.impl.zk; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.registry.client.api.BindFlags; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java index c3cb021fb53f3..065cbe3296b09 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java @@ -18,9 +18,9 @@ package org.apache.hadoop.registry.client.impl.zk; -import com.google.common.base.Preconditions; -import com.google.common.base.Splitter; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java index 3c4a730608f6e..e045c16e84b0a 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java @@ -18,8 +18,8 @@ package org.apache.hadoop.registry.client.impl.zk; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.api.GetChildrenBuilder; import org.apache.zookeeper.data.ACL; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java index 392884faf87f5..b92b93df7be55 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java @@ -20,7 +20,7 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.registry.client.binding.JsonSerDeser; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java index 9bb02c3cc3816..1a85436ed17ef 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonAnyGetter; import com.fasterxml.jackson.annotation.JsonAnySetter; import com.fasterxml.jackson.annotation.JsonInclude; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java index be63d028f1fd5..eeee581540963 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.registry.server.dns; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.FileUtils; import org.apache.commons.io.filefilter.IOFileFilter; import org.apache.commons.net.util.Base64; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java index 826b02c5e513c..1ff5f26b47207 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.registry.server.dns; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.registry.client.api.DNSOperationsFactory; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java index 796f46bae108e..bb375831d6eae 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.registry.server.dns; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.net.Inet6Address; import java.net.InetAddress; import java.net.UnknownHostException; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java index 6a1993eafd9c1..8d395e4c5c763 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.server.integration; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.registry.client.types.RegistryPathStatus; import org.apache.hadoop.registry.client.types.ServiceRecord; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java index a7e2611b3df9d..0ab4cd2f3bfac 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.server.services; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.curator.ensemble.fixed.FixedEnsembleProvider; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java index d60797e71ea02..3234088e01d84 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java @@ -19,7 +19,7 @@ package org.apache.hadoop.registry.server.services; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.curator.framework.api.BackgroundCallback; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index 8be2593c21ffd..bcd412a970d52 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-common-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Common Project Apache Hadoop Common Project pom diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index 07aa7b10a8320..1e7150ff3b60f 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Distribution Apache Hadoop Distribution jar diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index 20523cfbfa0cf..821e0e66afe47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project-dist hadoop-hdfs-client - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop HDFS Client Apache Hadoop HDFS Client jar @@ -113,6 +113,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.fasterxml.jackson.core jackson-databind + + org.bouncycastle + bcprov-jdk15on + test + diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java index 290f2c0e6766f..b014222fea5fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -135,6 +135,14 @@ public FileChecksum getFileChecksum(Path f) return dfs.getFileChecksumWithCombineMode(getUriPath(f), Long.MAX_VALUE); } + /** + * {@inheritDoc} + * + * If the given path is a symlink, the path will be resolved to a target path + * and it will get the resolved path's FileStatus object. It will not be + * represented as a symlink and isDirectory API returns true if the resolved + * path is a directory, false otherwise. + */ @Override public FileStatus getFileStatus(Path f) throws IOException, UnresolvedLinkException { @@ -145,7 +153,19 @@ public FileStatus getFileStatus(Path f) throw new FileNotFoundException("File does not exist: " + f.toString()); } } - + + /** + * Synchronize client metadata state with Active NameNode. + *

    + * In HA the client synchronizes its state with the Active NameNode + * in order to guarantee subsequent read consistency from Observer Nodes. + * @throws IOException + */ + @Override + public void msync() throws IOException { + dfs.msync(); + } + @Override public FileStatus getFileLinkStatus(Path f) throws IOException, UnresolvedLinkException { @@ -269,6 +289,20 @@ public HdfsFileStatus getNext() throws IOException { } } + /** + * {@inheritDoc} + * + * If any of the the immediate children of the given path f is a symlink, the + * returned FileStatus object of that children would be represented as a + * symlink. It will not be resolved to the target path and will not get the + * target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link Hdfs#getFileStatus(Path f)} + */ @Override public FileStatus[] listStatus(Path f) throws IOException, UnresolvedLinkException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java index cbd941b6b9d90..5bb7e03fc9d3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java @@ -40,10 +40,10 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.ScriptBasedMapping; -import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.ReflectionUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,7 +77,7 @@ public class ClientContext { /** * Caches short-circuit file descriptors, mmap regions. */ - private final ShortCircuitCache shortCircuitCache; + private final ShortCircuitCache[] shortCircuitCache; /** * Caches TCP and UNIX domain sockets for reuse. @@ -119,8 +119,6 @@ public class ClientContext { private NodeBase clientNode; private boolean topologyResolutionEnabled; - private Daemon deadNodeDetectorThr = null; - /** * The switch to DeadNodeDetector. */ @@ -130,7 +128,18 @@ public class ClientContext { * Detect the dead datanodes in advance, and share this information among all * the DFSInputStreams in the same client. */ - private DeadNodeDetector deadNodeDetector = null; + private volatile DeadNodeDetector deadNodeDetector = null; + + /** + * Count the reference of ClientContext. + */ + private int counter = 0; + + /** + * ShortCircuitCache array size. + */ + private final int clientShortCircuitNum; + private Configuration configuration; private ClientContext(String name, DfsClientConf conf, Configuration config) { @@ -138,7 +147,13 @@ private ClientContext(String name, DfsClientConf conf, this.name = name; this.confString = scConf.confAsString(); - this.shortCircuitCache = ShortCircuitCache.fromConf(scConf); + this.clientShortCircuitNum = conf.getClientShortCircuitNum(); + this.shortCircuitCache = new ShortCircuitCache[this.clientShortCircuitNum]; + for (int i = 0; i < this.clientShortCircuitNum; i++) { + this.shortCircuitCache[i] = ShortCircuitCache.fromConf(scConf); + } + + this.configuration = config; this.peerCache = new PeerCache(scConf.getSocketCacheCapacity(), scConf.getSocketCacheExpiry()); this.keyProviderCache = new KeyProviderCache( @@ -149,11 +164,6 @@ private ClientContext(String name, DfsClientConf conf, this.byteArrayManager = ByteArrayManager.newInstance( conf.getWriteByteArrayManagerConf()); this.deadNodeDetectionEnabled = conf.isDeadNodeDetectionEnabled(); - if (deadNodeDetectionEnabled && deadNodeDetector == null) { - deadNodeDetector = new DeadNodeDetector(name, config); - deadNodeDetectorThr = new Daemon(deadNodeDetector); - deadNodeDetectorThr.start(); - } initTopologyResolution(config); } @@ -191,6 +201,7 @@ public static ClientContext get(String name, DfsClientConf conf, context.printConfWarningIfNeeded(conf); } } + context.reference(); return context; } @@ -228,7 +239,11 @@ public String getConfString() { } public ShortCircuitCache getShortCircuitCache() { - return shortCircuitCache; + return shortCircuitCache[0]; + } + + public ShortCircuitCache getShortCircuitCache(long idx) { + return shortCircuitCache[(int) (idx % clientShortCircuitNum)]; } public PeerCache getPeerCache() { @@ -287,17 +302,27 @@ public DeadNodeDetector getDeadNodeDetector() { } /** - * Close dead node detector thread. + * Increment the counter. Start the dead node detector thread if there is no + * reference. */ - public void stopDeadNodeDetectorThread() { - if (deadNodeDetectorThr != null) { - deadNodeDetectorThr.interrupt(); - try { - deadNodeDetectorThr.join(); - } catch (InterruptedException e) { - LOG.warn("Encountered exception while waiting to join on dead " + - "node detector thread.", e); - } + synchronized void reference() { + counter++; + if (deadNodeDetectionEnabled && deadNodeDetector == null) { + deadNodeDetector = new DeadNodeDetector(name, configuration); + deadNodeDetector.start(); + } + } + + /** + * Decrement the counter. Close the dead node detector thread if there is no + * reference. + */ + synchronized void unreference() { + Preconditions.checkState(counter > 0); + counter--; + if (counter == 0 && deadNodeDetectionEnabled && deadNodeDetector != null) { + deadNodeDetector.shutdown(); + deadNodeDetector = null; } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 72b2113943756..b210f910416fb 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -192,11 +192,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.net.InetAddresses; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; /******************************************************** * DFSClient can connect to a Hadoop Filesystem and @@ -652,7 +652,7 @@ public synchronized void close() throws IOException { clientRunning = false; // close dead node detector thread if (!disabledStopDeadNodeDetectorThreadForTest) { - clientContext.stopDeadNodeDetectorThread(); + clientContext.unreference(); } // close connections to the namenode @@ -2004,8 +2004,17 @@ private long getStateByIndex(int stateIndex) throws IOException { * @see ClientProtocol#getStats() */ public FsStatus getDiskStatus() throws IOException { - return new FsStatus(getStateByIndex(0), - getStateByIndex(1), getStateByIndex(2)); + try (TraceScope ignored = tracer.newScope("getStats")) { + long[] states = namenode.getStats(); + return new FsStatus(getStateAtIndex(states, 0), + getStateAtIndex(states, 1), getStateAtIndex(states, 2)); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } + } + + private long getStateAtIndex(long[] states, int index) { + return states.length > index ? states[index] : -1; } /** @@ -3378,4 +3387,11 @@ public void removeNodeFromDeadNodeDetector(DFSInputStream dfsInputStream, private boolean isDeadNodeDetectionEnabled() { return clientContext.isDeadNodeDetectionEnabled(); } + + /** + * Obtain DeadNodeDetector of the current client. + */ + public DeadNodeDetector getDeadNodeDetector() { + return clientContext.getDeadNodeDetector(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java index d36c0581c2153..3e4d1df8a5ca5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java @@ -19,7 +19,7 @@ import java.util.concurrent.atomic.AtomicLong; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index af9891a52fd80..a918101f0fa6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -88,7 +88,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import javax.annotation.Nonnull; @@ -181,10 +181,13 @@ private boolean isPeriodicRefreshEnabled() { private byte[] oneByteBuf; // used for 'int read()' protected void addToLocalDeadNodes(DatanodeInfo dnInfo) { + DFSClient.LOG.debug("Add {} to local dead nodes, previously was {}.", + dnInfo, deadNodes); deadNodes.put(dnInfo, dnInfo); } protected void removeFromLocalDeadNodes(DatanodeInfo dnInfo) { + DFSClient.LOG.debug("Remove {} from local dead nodes.", dnInfo); deadNodes.remove(dnInfo); } @@ -1054,10 +1057,21 @@ protected DNAddrPair getBestNodeDNAddrPair(LocatedBlock block, StorageType[] storageTypes = block.getStorageTypes(); DatanodeInfo chosenNode = null; StorageType storageType = null; - if (nodes != null) { + if (dfsClient.getConf().isReadUseCachePriority()) { + DatanodeInfo[] cachedLocs = block.getCachedLocations(); + if (cachedLocs != null) { + for (int i = 0; i < cachedLocs.length; i++) { + if (isValidNode(cachedLocs[i], ignoredNodes)) { + chosenNode = cachedLocs[i]; + break; + } + } + } + } + + if (chosenNode == null && nodes != null) { for (int i = 0; i < nodes.length; i++) { - if (!dfsClient.getDeadNodes(this).containsKey(nodes[i]) - && (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) { + if (isValidNode(nodes[i], ignoredNodes)) { chosenNode = nodes[i]; // Storage types are ordered to correspond with nodes, so use the same // index to get storage type. @@ -1090,6 +1104,15 @@ protected void reportLostBlock(LocatedBlock lostBlock, ", ignoredNodes = " + ignoredNodes); } + private boolean isValidNode(DatanodeInfo node, + Collection ignoredNodes) { + if (!dfsClient.getDeadNodes(this).containsKey(node) + && (ignoredNodes == null || !ignoredNodes.contains(node))) { + return true; + } + return false; + } + private static String getBestNodeDNAddrPairErrorString( DatanodeInfo nodes[], AbstractMap deadNodes, Collection ignoredNodes) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index a9e44cd5c0def..9003e51139346 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; @@ -66,14 +67,13 @@ import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum.Type; import org.apache.hadoop.util.Progressable; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.htrace.core.TraceScope; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /**************************************************************** @@ -560,13 +560,7 @@ void endBlock() throws IOException { @Override public boolean hasCapability(String capability) { - switch (StringUtils.toLowerCase(capability)) { - case StreamCapabilities.HSYNC: - case StreamCapabilities.HFLUSH: - return true; - default: - return false; - } + return StoreImplementationUtils.isProbeForSyncable(capability); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java index ba35d51561162..61585210ea355 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.hdfs.protocol.BlockType; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java index 8d651d855c0c2..7c3965647c54b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CreateFlag; @@ -283,6 +283,7 @@ private void flipDataBuffers() { private ExecutorService flushAllExecutor; private CompletionService flushAllExecutorCompletionService; private int blockGroupIndex; + private long datanodeRestartTimeout; /** Construct a new output stream for creating a file. */ DFSStripedOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat, @@ -322,6 +323,7 @@ private void flipDataBuffers() { streamers.add(streamer); } currentPackets = new DFSPacket[streamers.size()]; + datanodeRestartTimeout = dfsClient.getConf().getDatanodeRestartTimeout(); setCurrentStreamer(0); } @@ -501,8 +503,14 @@ private void allocateNewBlock() throws IOException { LOG.debug("Allocating new block group. The previous block group: " + prevBlockGroup); - final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src, - prevBlockGroup, fileId, favoredNodes, getAddBlockFlags()); + final LocatedBlock lb; + try { + lb = addBlock(excludedNodes, dfsClient, src, + prevBlockGroup, fileId, favoredNodes, getAddBlockFlags()); + } catch (IOException ioe) { + closeAllStreamers(); + throw ioe; + } assert lb.isStriped(); // assign the new block to the current block group currentBlockGroup = lb.getBlock(); @@ -637,6 +645,11 @@ private Set markExternalErrorOnStreamers() { "streamer: " + streamer); streamer.setExternalError(); healthySet.add(streamer); + } else if (!streamer.streamerClosed() + && streamer.getErrorState().hasDatanodeError() + && streamer.getErrorState().doWaitForRestart()) { + healthySet.add(streamer); + failedStreamers.remove(streamer); } } return healthySet; @@ -701,6 +714,14 @@ private void checkStreamerFailures(boolean isNeedFlushAllPackets) for (int i = 0; i < numAllBlocks; i++) { coordinator.offerStreamerUpdateResult(i, newFailed.size() == 0); } + //wait for get notify to failed stream + if (newFailed.size() != 0) { + try { + Thread.sleep(datanodeRestartTimeout); + } catch (InterruptedException e) { + // Do nothing + } + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index 95aad12d928b2..2b3c67683c730 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; -import com.google.common.primitives.SignedBytes; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.primitives.SignedBytes; import java.net.URISyntaxException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index 1344c3b3a6ef6..3d6d3c53f9e10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -42,7 +42,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite; @@ -80,11 +80,11 @@ import org.apache.htrace.core.TraceScope; import org.apache.htrace.core.Tracer; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalListener; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalNotification; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -483,6 +483,7 @@ boolean doWaitForRestart() { private volatile BlockConstructionStage stage; // block construction stage protected long bytesSent = 0; // number of bytes that've been sent private final boolean isLazyPersistFile; + private long lastPacket; /** Nodes have been used in the pipeline before and have failed. */ private final List failed = new ArrayList<>(); @@ -632,6 +633,7 @@ private void initDataStreaming() { response = new ResponseProcessor(nodes); response.start(); stage = BlockConstructionStage.DATA_STREAMING; + lastPacket = Time.monotonicNow(); } protected void endBlock() { @@ -653,7 +655,6 @@ private boolean shouldStop() { */ @Override public void run() { - long lastPacket = Time.monotonicNow(); TraceScope scope = null; while (!streamerClosed && dfsClient.clientRunning) { // if the Responder encountered an error, shutdown Responder @@ -666,44 +667,35 @@ public void run() { // process datanode IO errors if any boolean doSleep = processDatanodeOrExternalError(); - final int halfSocketTimeout = dfsClient.getConf().getSocketTimeout()/2; synchronized (dataQueue) { // wait for a packet to be sent. - long now = Time.monotonicNow(); - while ((!shouldStop() && dataQueue.size() == 0 && - (stage != BlockConstructionStage.DATA_STREAMING || - now - lastPacket < halfSocketTimeout)) || doSleep) { - long timeout = halfSocketTimeout - (now-lastPacket); - timeout = timeout <= 0 ? 1000 : timeout; - timeout = (stage == BlockConstructionStage.DATA_STREAMING)? - timeout : 1000; + while ((!shouldStop() && dataQueue.isEmpty()) || doSleep) { + long timeout = 1000; + if (stage == BlockConstructionStage.DATA_STREAMING) { + timeout = sendHeartbeat(); + } try { dataQueue.wait(timeout); } catch (InterruptedException e) { LOG.debug("Thread interrupted", e); } doSleep = false; - now = Time.monotonicNow(); } if (shouldStop()) { continue; } // get packet to be sent. - if (dataQueue.isEmpty()) { - one = createHeartbeatPacket(); - } else { - try { - backOffIfNecessary(); - } catch (InterruptedException e) { - LOG.debug("Thread interrupted", e); - } - one = dataQueue.getFirst(); // regular data packet - SpanId[] parents = one.getTraceParents(); - if (parents.length > 0) { - scope = dfsClient.getTracer(). - newScope("dataStreamer", parents[0]); - scope.getSpan().setParents(parents); - } + try { + backOffIfNecessary(); + } catch (InterruptedException e) { + LOG.debug("Thread interrupted", e); + } + one = dataQueue.getFirst(); // regular data packet + SpanId[] parents = one.getTraceParents(); + if (parents.length > 0) { + scope = dfsClient.getTracer(). + newScope("dataStreamer", parents[0]); + scope.getSpan().setParents(parents); } } @@ -731,17 +723,8 @@ public void run() { if (one.isLastPacketInBlock()) { // wait for all data packets have been successfully acked - synchronized (dataQueue) { - while (!shouldStop() && ackQueue.size() != 0) { - try { - // wait for acks to arrive from datanodes - dataQueue.wait(1000); - } catch (InterruptedException e) { - LOG.debug("Thread interrupted", e); - } - } - } - if (shouldStop()) { + waitForAllAcks(); + if(shouldStop()) { continue; } stage = BlockConstructionStage.PIPELINE_CLOSE; @@ -770,8 +753,7 @@ public void run() { // write out data to remote datanode try (TraceScope ignored = dfsClient.getTracer(). newScope("DataStreamer#writeTo", spanId)) { - one.writeTo(blockStream); - blockStream.flush(); + sendPacket(one); } catch (IOException e) { // HDFS-3398 treat primary DN is down since client is unable to // write to primary DN. If a failed or restarting node has already @@ -782,7 +764,6 @@ public void run() { errorState.markFirstNodeIfNotMarked(); throw e; } - lastPacket = Time.monotonicNow(); // update bytesSent long tmpBytesSent = one.getLastByteOffsetBlock(); @@ -797,11 +778,7 @@ public void run() { // Is this block full? if (one.isLastPacketInBlock()) { // wait for the close packet has been acked - synchronized (dataQueue) { - while (!shouldStop() && ackQueue.size() != 0) { - dataQueue.wait(1000);// wait for acks to arrive from datanodes - } - } + waitForAllAcks(); if (shouldStop()) { continue; } @@ -842,6 +819,48 @@ public void run() { closeInternal(); } + private void waitForAllAcks() throws IOException { + // wait until all data packets have been successfully acked + synchronized (dataQueue) { + while (!shouldStop() && !ackQueue.isEmpty()) { + try { + // wait for acks to arrive from datanodes + dataQueue.wait(sendHeartbeat()); + } catch (InterruptedException e) { + LOG.debug("Thread interrupted ", e); + } + } + } + } + + private void sendPacket(DFSPacket packet) throws IOException { + // write out data to remote datanode + try { + packet.writeTo(blockStream); + blockStream.flush(); + } catch (IOException e) { + // HDFS-3398 treat primary DN is down since client is unable to + // write to primary DN. If a failed or restarting node has already + // been recorded by the responder, the following call will have no + // effect. Pipeline recovery can handle only one node error at a + // time. If the primary node fails again during the recovery, it + // will be taken out then. + errorState.markFirstNodeIfNotMarked(); + throw e; + } + lastPacket = Time.monotonicNow(); + } + + private long sendHeartbeat() throws IOException { + final long heartbeatInterval = dfsClient.getConf().getSocketTimeout()/2; + long timeout = heartbeatInterval - (Time.monotonicNow() - lastPacket); + if (timeout <= 0) { + sendPacket(createHeartbeatPacket()); + timeout = heartbeatInterval; + } + return timeout; + } + private void closeInternal() { closeResponder(); // close and join closeStream(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java index a573e8a22aad3..cd46551f0225b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -29,9 +29,9 @@ import java.util.HashSet; import java.util.Map; -import java.util.Queue; import java.util.Set; -import java.util.concurrent.ArrayBlockingQueue; +import java.util.Deque; +import java.util.LinkedList; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; @@ -40,8 +40,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_DEFAULT; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_DEAD_NODE_INTERVAL_MS_DEFAULT; @@ -54,15 +52,15 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_SUSPECT_NODE_THREADS_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_RPC_THREADS_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_RPC_THREADS_KEY; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_DEFAULT; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_DEFAULT; /** * Detect the dead nodes in advance, and share this information among all the * DFSInputStreams in the same client. */ -public class DeadNodeDetector implements Runnable { +public class DeadNodeDetector extends Daemon { public static final Logger LOG = LoggerFactory.getLogger(DeadNodeDetector.class); @@ -74,7 +72,7 @@ public class DeadNodeDetector implements Runnable { /** * Waiting time when DeadNodeDetector's state is idle. */ - private static final long IDLE_SLEEP_MS = 10000; + private final long idleSleepMs; /** * Client context name. @@ -113,16 +111,6 @@ public class DeadNodeDetector implements Runnable { */ private long suspectNodeDetectInterval = 0; - /** - * The max queue size of probing dead node. - */ - private int maxDeadNodesProbeQueueLen = 0; - - /** - * The max queue size of probing suspect node. - */ - private int maxSuspectNodesProbeQueueLen; - /** * Connection timeout for probing dead node in milliseconds. */ @@ -131,12 +119,12 @@ public class DeadNodeDetector implements Runnable { /** * The dead node probe queue. */ - private Queue deadNodesProbeQueue; + private UniqueQueue deadNodesProbeQueue; /** * The suspect node probe queue. */ - private Queue suspectNodesProbeQueue; + private UniqueQueue suspectNodesProbeQueue; /** * The thread pool of probing dead node. @@ -181,6 +169,32 @@ private enum State { INIT, CHECK_DEAD, IDLE, ERROR } + /** + * The thread safe unique queue. + */ + static class UniqueQueue { + private Deque queue = new LinkedList<>(); + private Set set = new HashSet<>(); + + synchronized boolean offer(T dn) { + if (set.add(dn)) { + queue.addLast(dn); + return true; + } + return false; + } + + synchronized T poll() { + T dn = queue.pollFirst(); + set.remove(dn); + return dn; + } + + synchronized int size() { + return set.size(); + } + } + /** * Disabled start probe suspect/dead thread for the testing. */ @@ -203,20 +217,14 @@ public DeadNodeDetector(String name, Configuration conf) { DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_SUSPECT_NODE_INTERVAL_MS_DEFAULT); socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT); - maxDeadNodesProbeQueueLen = - conf.getInt(DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY, - DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_DEFAULT); - maxSuspectNodesProbeQueueLen = - conf.getInt(DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY, - DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_DEFAULT); probeConnectionTimeoutMs = conf.getLong( DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY, DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_DEFAULT); + this.deadNodesProbeQueue = new UniqueQueue<>(); + this.suspectNodesProbeQueue = new UniqueQueue<>(); - this.deadNodesProbeQueue = - new ArrayBlockingQueue(maxDeadNodesProbeQueueLen); - this.suspectNodesProbeQueue = - new ArrayBlockingQueue(maxSuspectNodesProbeQueueLen); + idleSleepMs = conf.getLong(DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY, + DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_DEFAULT); int deadNodeDetectDeadThreads = conf.getInt(DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_DEAD_NODE_THREADS_KEY, @@ -271,6 +279,37 @@ public void run() { } } + /** + * Shutdown all the threads. + */ + public void shutdown() { + threadShutDown(this); + threadShutDown(probeDeadNodesSchedulerThr); + threadShutDown(probeSuspectNodesSchedulerThr); + probeDeadNodesThreadPool.shutdown(); + probeSuspectNodesThreadPool.shutdown(); + rpcThreadPool.shutdown(); + } + + private static void threadShutDown(Thread thread) { + if (thread != null && thread.isAlive()) { + thread.interrupt(); + try { + thread.join(); + } catch (InterruptedException e) { + } + } + } + + @VisibleForTesting + boolean isThreadsShutdown() { + return !this.isAlive() && !probeDeadNodesSchedulerThr.isAlive() + && !probeSuspectNodesSchedulerThr.isAlive() + && probeDeadNodesThreadPool.isShutdown() + && probeSuspectNodesThreadPool.isShutdown() + && rpcThreadPool.isShutdown(); + } + @VisibleForTesting static void setDisabledProbeThreadForTest( boolean disabledProbeThreadForTest) { @@ -294,7 +333,7 @@ void startProbeScheduler() { } /** - * Prode datanode by probe byte. + * Prode datanode by probe type. */ private void scheduleProbe(ProbeType type) { LOG.debug("Schedule probe datanode for probe type: {}.", type); @@ -376,9 +415,8 @@ public DatanodeLocalInfo call() throws Exception { } catch (Exception e) { LOG.error("Probe failed, datanode: {}, type: {}.", datanodeInfo, type, e); + deadNodeDetector.probeCallBack(this, false); } - - deadNodeDetector.probeCallBack(this, false); } } @@ -402,7 +440,7 @@ private void probeCallBack(Probe probe, boolean success) { } } else { if (probe.getType() == ProbeType.CHECK_SUSPECT) { - LOG.info("Add the node to dead node list: {}.", + LOG.warn("Probe failed, add suspect node to dead node list: {}.", probe.getDatanodeInfo()); addToDead(probe.getDatanodeInfo()); } @@ -415,11 +453,11 @@ private void probeCallBack(Probe probe, boolean success) { private void checkDeadNodes() { Set datanodeInfos = clearAndGetDetectedDeadNodes(); for (DatanodeInfo datanodeInfo : datanodeInfos) { - LOG.debug("Add dead node to check: {}.", datanodeInfo); if (!deadNodesProbeQueue.offer(datanodeInfo)) { LOG.debug("Skip to add dead node {} to check " + - "since the probe queue is full.", datanodeInfo); - break; + "since the node is already in the probe queue.", datanodeInfo); + } else { + LOG.debug("Add dead node to check: {}.", datanodeInfo); } } state = State.IDLE; @@ -427,7 +465,7 @@ private void checkDeadNodes() { private void idle() { try { - Thread.sleep(IDLE_SLEEP_MS); + Thread.sleep(idleSleepMs); } catch (InterruptedException e) { LOG.debug("Got interrupted while DeadNodeDetector is idle.", e); Thread.currentThread().interrupt(); @@ -452,14 +490,24 @@ private void removeFromDead(DatanodeInfo datanodeInfo) { deadNodes.remove(datanodeInfo.getDatanodeUuid()); } - public Queue getDeadNodesProbeQueue() { + public UniqueQueue getDeadNodesProbeQueue() { return deadNodesProbeQueue; } - public Queue getSuspectNodesProbeQueue() { + public UniqueQueue getSuspectNodesProbeQueue() { return suspectNodesProbeQueue; } + @VisibleForTesting + void setSuspectQueue(UniqueQueue queue) { + this.suspectNodesProbeQueue = queue; + } + + @VisibleForTesting + void setDeadQueue(UniqueQueue queue) { + this.deadNodesProbeQueue = queue; + } + /** * Add datanode to suspectNodes and suspectAndDeadNodes. */ @@ -475,6 +523,7 @@ public synchronized void addNodeToDetect(DFSInputStream dfsInputStream, datanodeInfos.add(datanodeInfo); } + LOG.debug("Add datanode {} to suspectAndDeadNodes.", datanodeInfo); addSuspectNodeToDetect(datanodeInfo); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index d1babe3280c8d..53655d16874d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -19,9 +19,9 @@ package org.apache.hadoop.hdfs; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.collections.list.TreeList; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; @@ -52,6 +52,7 @@ import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; import org.apache.hadoop.fs.InvalidPathHandleException; import org.apache.hadoop.fs.PartialListing; +import org.apache.hadoop.fs.MultipartUploaderBuilder; import org.apache.hadoop.fs.PathHandle; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Options; @@ -66,6 +67,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.impl.FileSystemMultipartUploaderBuilder; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -181,7 +183,7 @@ public void initialize(URI uri, Configuration conf) throws IOException { throw new IOException("Incomplete HDFS URI, no host: "+ uri); } - this.dfs = new DFSClient(uri, conf, statistics); + initDFSClient(uri, conf); this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority()); this.workingDir = getHomeDirectory(); @@ -195,6 +197,10 @@ public StorageStatistics provide() { }); } + void initDFSClient(URI theUri, Configuration conf) throws IOException { + this.dfs = new DFSClient(theUri, conf, statistics); + } + @Override public Path getWorkingDirectory() { return workingDir; @@ -1143,10 +1149,21 @@ private FileStatus[] listStatusInternal(Path p) throws IOException { /** * List all the entries of a directory * - * Note that this operation is not atomic for a large directory. - * The entries of a directory may be fetched from NameNode multiple times. - * It only guarantees that each name occurs once if a directory - * undergoes changes between the calls. + * Note that this operation is not atomic for a large directory. The entries + * of a directory may be fetched from NameNode multiple times. It only + * guarantees that each name occurs once if a directory undergoes changes + * between the calls. + * + * If any of the the immediate children of the given path f is a symlink, the + * returned FileStatus object of that children would be represented as a + * symlink. It will not be resolved to the target path and will not get the + * target path FileStatus object. The target path will be available via + * getSymlink on that children's FileStatus object. Since it represents as + * symlink, isDirectory on that children's FileStatus will return false. + * + * If you want to get the FileStatus of target path for that children, you may + * want to use GetFileStatus API with that children's symlink path. Please see + * {@link DistributedFileSystem#getFileStatus(Path f)} */ @Override public FileStatus[] listStatus(Path p) throws IOException { @@ -1494,10 +1511,14 @@ protected boolean primitiveMkdir(Path f, FsPermission absolutePermission) @Override public void close() throws IOException { try { - dfs.closeOutputStreams(false); + if (dfs != null) { + dfs.closeOutputStreams(false); + } super.close(); } finally { - dfs.close(); + if (dfs != null) { + dfs.close(); + } } } @@ -1712,6 +1733,12 @@ public FsServerDefaults getServerDefaults() throws IOException { /** * Returns the stat information about the file. + * + * If the given path is a symlink, the path will be resolved to a target path + * and it will get the resolved path's FileStatus object. It will not be + * represented as a symlink and isDirectory API returns true if the resolved + * path is a directory, false otherwise. + * * @throws FileNotFoundException if the file does not exist. */ @Override @@ -1737,6 +1764,18 @@ public FileStatus next(final FileSystem fs, final Path p) }.resolve(this, absF); } + /** + * Synchronize client metadata state with Active NameNode. + *

    + * In HA the client synchronizes its state with the Active NameNode + * in order to guarantee subsequent read consistency from Observer Nodes. + * @throws IOException + */ + @Override + public void msync() throws IOException { + dfs.msync(); + } + @SuppressWarnings("deprecation") @Override public void createSymlink(final Path target, final Path link, @@ -3549,7 +3588,8 @@ public RemoteIterator listOpenFiles( public RemoteIterator listOpenFiles( EnumSet openFilesTypes, String path) throws IOException { - return dfs.listOpenFiles(openFilesTypes, path); + Path absF = fixRelativePart(new Path(path)); + return dfs.listOpenFiles(openFilesTypes, getPathName(absF)); } @@ -3590,4 +3630,10 @@ public boolean hasPathCapability(final Path path, final String capability) return super.hasPathCapability(p, capability); } + + @Override + public MultipartUploaderBuilder createMultipartUploader(final Path basePath) + throws IOException { + return new FileSystemMultipartUploaderBuilder(this, basePath); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java index 17d20fe014d1b..29073a1b38059 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java @@ -28,11 +28,11 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.util.KMSUtil; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalListener; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalNotification; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java index c640b39b6f488..31bc2d97a8662 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java @@ -35,8 +35,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -56,7 +56,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.io.retry.RetryUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; @@ -355,7 +355,7 @@ public static ClientProtocol createProxyWithAlignmentContext( AlignmentContext alignmentContext) throws IOException { RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy( diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java index 0580ed536da15..79f313398be0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java @@ -23,9 +23,9 @@ import java.util.List; import java.util.Map.Entry; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.LinkedListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java index 65d2c906a952c..efadedb8f082f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.util.StripedBlockUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java index b37501d2e2be7..8bc6951f593ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.util.StripedBlockUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java index 8fd38bdb3b795..e66d391af6943 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java index d920f18e24748..e90e66ace4988 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java @@ -33,7 +33,7 @@ import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Progressable; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class extends {@link DataStreamer} to support writing striped blocks @@ -143,7 +143,8 @@ protected void setupPipelineInternal(DatanodeInfo[] nodes, // set up the pipeline again with the remaining nodes. when a striped // data streamer comes here, it must be in external error state. - assert getErrorState().hasExternalError(); + assert getErrorState().hasExternalError() + || getErrorState().doWaitForRestart(); success = createBlockOutputStream(nodes, nodeStorageTypes, nodeStorageIDs, newGS, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ViewDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ViewDistributedFileSystem.java new file mode 100644 index 0000000000000..4fee963290618 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ViewDistributedFileSystem.java @@ -0,0 +1,2323 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.BlockStoragePolicySpi; +import org.apache.hadoop.fs.CacheFlag; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.FsStatus; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.PartialListing; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.PathHandle; +import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.viewfs.ViewFileSystem; +import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsPathHandle; +import org.apache.hadoop.hdfs.protocol.OpenFileEntry; +import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; +import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.io.MultipleIOException; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.token.DelegationTokenIssuer; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.Progressable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileNotFoundException; +import java.io.IOException; + +import java.net.InetSocketAddress; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; + +/** + * The ViewDistributedFileSystem is an extended class to DistributedFileSystem + * with additional mounting functionality. The goal is to have better API + * compatibility for HDFS users when using mounting + * filesystem(ViewFileSystemOverloadScheme). + * The ViewFileSystemOverloadScheme{@link ViewFileSystemOverloadScheme} is a new + * filesystem with inherited mounting functionality from ViewFileSystem. + * For the user who is using ViewFileSystemOverloadScheme by setting + * fs.hdfs.impl=org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme, now + * they can set fs.hdfs.impl=org.apache.hadoop.hdfs.ViewDistributedFileSystem. + * So, that the hdfs users will get closely compatible API with mount + * functionality. For the rest of all other schemes can continue to use + * ViewFileSystemOverloadScheme class directly for mount functionality. Please + * note that ViewFileSystemOverloadScheme provides only + * ViewFileSystem{@link ViewFileSystem} APIs. + * If user configured this class but no mount point configured? Then it will + * simply work as existing DistributedFileSystem class. If user configured both + * fs.hdfs.impl to this class and mount configurations, then users will be able + * to make calls the APIs available in this class, they are nothing but DFS + * APIs, but they will be delegated to viewfs functionality. Please note, APIs + * without any path in arguments( ex: isInSafeMode), will be delegated to + * default filesystem only, that is the configured fallback link. If you want to + * make these API calls on specific child filesystem, you may want to initialize + * them separately and call. In ViewDistributedFileSystem, we strongly recommend + * to configure linkFallBack when you add mount links and it's recommended to + * point be to your base cluster, usually your current fs.defaultFS if that's + * pointing to hdfs. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class ViewDistributedFileSystem extends DistributedFileSystem { + private static final Logger LOGGER = + LoggerFactory.getLogger(ViewDistributedFileSystem.class); + + // A mounting file system. + private ViewFileSystemOverloadScheme vfs; + // A default DFS, which should have set via linkFallback + private DistributedFileSystem defaultDFS; + + @Override + public void initialize(URI uri, Configuration conf) throws IOException { + super.initialize(uri, conf); + try { + this.vfs = tryInitializeMountingViewFs(uri, conf); + } catch (IOException ioe) { + LOGGER.debug(new StringBuilder("Mount tree initialization failed with ") + .append("the reason => {}. Falling back to regular DFS") + .append(" initialization. Please re-initialize the fs after updating") + .append(" mount point.").toString(), ioe.getMessage()); + // Previous super.initialize would have skipped the dfsclient init and + // setWorkingDirectory as we planned to initialize vfs. Since vfs init + // failed, let's init dfsClient now. + super.initDFSClient(uri, conf); + super.setWorkingDirectory(super.getHomeDirectory()); + return; + } + + setConf(conf); + // A child DFS with the current initialized URI. This must be same as + // fallback fs. The fallback must point to root of your filesystems. + // Some APIs(without path in argument, for example isInSafeMode) will + // support only for base cluster filesystem. Only that APIs will use this + // fs. + defaultDFS = (DistributedFileSystem) this.vfs.getFallbackFileSystem(); + // Please don't access internal dfs client directly except in tests. + dfs = (defaultDFS != null) ? defaultDFS.dfs : null; + super.setWorkingDirectory(this.vfs.getHomeDirectory()); + } + + @Override + void initDFSClient(URI uri, Configuration conf) throws IOException { + // Since we plan to initialize vfs in this class, we will not need to + // initialize DFS client. + } + + public ViewDistributedFileSystem() { + } + + private ViewFileSystemOverloadScheme tryInitializeMountingViewFs(URI theUri, + Configuration conf) throws IOException { + ViewFileSystemOverloadScheme viewFs = new ViewFileSystemOverloadScheme(); + viewFs.setSupportAutoAddingFallbackOnNoMounts(false); + viewFs.initialize(theUri, conf); + return viewFs; + } + + @Override + public URI getUri() { + if (this.vfs == null) { + return super.getUri(); + } + return this.vfs.getUri(); + } + + @Override + public String getScheme() { + if (this.vfs == null) { + return super.getScheme(); + } + return this.vfs.getScheme(); + } + + @Override + public Path getWorkingDirectory() { + if (this.vfs == null) { + return super.getWorkingDirectory(); + } + return this.vfs.getWorkingDirectory(); + } + + @Override + public void setWorkingDirectory(Path dir) { + if (this.vfs == null) { + super.setWorkingDirectory(dir); + return; + } + this.vfs.setWorkingDirectory(dir); + } + + @Override + public Path getHomeDirectory() { + if (super.dfs == null) { + return null; + } + if (this.vfs == null) { + return super.getHomeDirectory(); + } + return this.vfs.getHomeDirectory(); + } + + /** + * Returns only default cluster getHedgedReadMetrics. + */ + @Override + public DFSHedgedReadMetrics getHedgedReadMetrics() { + if (this.vfs == null) { + return super.getHedgedReadMetrics(); + } + checkDefaultDFS(defaultDFS, "getHedgedReadMetrics"); + return defaultDFS.getHedgedReadMetrics(); + } + + @Override + public BlockLocation[] getFileBlockLocations(FileStatus fs, long start, + long len) throws IOException { + if (this.vfs == null) { + return super.getFileBlockLocations(fs, start, len); + } + return this.vfs.getFileBlockLocations(fs, start, len); + } + + @Override + public BlockLocation[] getFileBlockLocations(Path p, final long start, + final long len) throws IOException { + if (this.vfs == null) { + return super.getFileBlockLocations(p, start, len); + } + return this.vfs.getFileBlockLocations(p, start, len); + } + + @Override + public void setVerifyChecksum(final boolean verifyChecksum) { + if (this.vfs == null) { + super.setVerifyChecksum(verifyChecksum); + return; + } + this.vfs.setVerifyChecksum(verifyChecksum); + } + + @Override + public boolean recoverLease(final Path f) throws IOException { + if (this.vfs == null) { + return super.recoverLease(f); + } + + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "recoverLease"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .recoverLease(mountPathInfo.getPathOnTarget()); + } + + @Override + public FSDataInputStream open(final Path f, final int bufferSize) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + return super.open(f, bufferSize); + } + + return this.vfs.open(f, bufferSize); + } + + @Override + public FSDataInputStream open(PathHandle fd, int bufferSize) + throws IOException { + if (this.vfs == null) { + return super.open(fd, bufferSize); + } + return this.vfs.open(fd, bufferSize); + } + + @Override + protected HdfsPathHandle createPathHandle(FileStatus st, + Options.HandleOpt... opts) { + if (this.vfs == null) { + return super.createPathHandle(st, opts); + } + throw new UnsupportedOperationException(); + } + + @Override + public FSDataOutputStream append(final Path f, final int bufferSize, + final Progressable progress) throws IOException { + if (this.vfs == null) { + return super.append(f, bufferSize, progress); + } + return this.vfs.append(f, bufferSize, progress); + } + + @Override + public FSDataOutputStream append(Path f, final EnumSet flag, + final int bufferSize, final Progressable progress) throws IOException { + if (this.vfs == null) { + return super.append(f, flag, bufferSize, progress); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "append"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .append(mountPathInfo.getPathOnTarget(), flag, bufferSize, progress); + } + + @Override + public FSDataOutputStream append(Path f, final EnumSet flag, + final int bufferSize, final Progressable progress, + final InetSocketAddress[] favoredNodes) throws IOException { + if (this.vfs == null) { + return super.append(f, flag, bufferSize, progress, favoredNodes); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "append"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .append(mountPathInfo.getPathOnTarget(), flag, bufferSize, progress, + favoredNodes); + } + + @Override + public FSDataOutputStream create(Path f, FsPermission permission, + boolean overwrite, int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + if (this.vfs == null) { + return super + .create(f, permission, overwrite, bufferSize, replication, blockSize, + progress); + } + return this.vfs + .create(f, permission, overwrite, bufferSize, replication, blockSize, + progress); + } + + @Override + public HdfsDataOutputStream create(final Path f, + final FsPermission permission, final boolean overwrite, + final int bufferSize, final short replication, final long blockSize, + final Progressable progress, final InetSocketAddress[] favoredNodes) + throws IOException { + if (this.vfs == null) { + return super + .create(f, permission, overwrite, bufferSize, replication, blockSize, + progress, favoredNodes); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "create"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .create(mountPathInfo.getPathOnTarget(), permission, overwrite, + bufferSize, replication, blockSize, progress, favoredNodes); + } + + @Override + //DFS specific API + public FSDataOutputStream create(final Path f, final FsPermission permission, + final EnumSet cflags, final int bufferSize, + final short replication, final long blockSize, + final Progressable progress, final Options.ChecksumOpt checksumOpt) + throws IOException { + if (this.vfs == null) { + return super + .create(f, permission, cflags, bufferSize, replication, blockSize, + progress, checksumOpt); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "create"); + return mountPathInfo.getTargetFs() + .create(mountPathInfo.getPathOnTarget(), permission, cflags, bufferSize, + replication, blockSize, progress, checksumOpt); + } + + void checkDFS(FileSystem fs, String methodName) { + if (!(fs instanceof DistributedFileSystem)) { + String msg = new StringBuilder("This API:").append(methodName) + .append(" is specific to DFS. Can't run on other fs:") + .append(fs.getUri()).toString(); + throw new UnsupportedOperationException(msg); + } + } + + void checkDefaultDFS(FileSystem fs, String methodName) { + if (fs == null) { + String msg = new StringBuilder("This API:").append(methodName).append( + " cannot be supported without default cluster(that is linkFallBack).") + .toString(); + throw new UnsupportedOperationException(msg); + } + } + + @Override + // DFS specific API + protected HdfsDataOutputStream primitiveCreate(Path f, + FsPermission absolutePermission, EnumSet flag, int bufferSize, + short replication, long blockSize, Progressable progress, + Options.ChecksumOpt checksumOpt) throws IOException { + if (this.vfs == null) { + return super + .primitiveCreate(f, absolutePermission, flag, bufferSize, replication, + blockSize, progress, checksumOpt); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "primitiveCreate"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .primitiveCreate(f, absolutePermission, flag, bufferSize, replication, + blockSize, progress, checksumOpt); + } + + @Override + public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, + EnumSet flags, int bufferSize, short replication, + long blockSize, Progressable progress) throws IOException { + if (this.vfs == null) { + return super + .createNonRecursive(f, permission, flags, bufferSize, replication, + bufferSize, progress); + } + return this.vfs + .createNonRecursive(f, permission, flags, bufferSize, replication, + bufferSize, progress); + } + + @Override + public boolean setReplication(final Path f, final short replication) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + return super.setReplication(f, replication); + } + return this.vfs.setReplication(f, replication); + } + + @Override + public void setStoragePolicy(Path src, String policyName) throws IOException { + if (this.vfs == null) { + super.setStoragePolicy(src, policyName); + return; + } + this.vfs.setStoragePolicy(src, policyName); + } + + @Override + public void unsetStoragePolicy(Path src) throws IOException { + if (this.vfs == null) { + super.unsetStoragePolicy(src); + return; + } + this.vfs.unsetStoragePolicy(src); + } + + @Override + public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException { + if (this.vfs == null) { + return super.getStoragePolicy(src); + } + return this.vfs.getStoragePolicy(src); + } + + @Override + public Collection getAllStoragePolicies() + throws IOException { + if (this.vfs == null) { + return super.getAllStoragePolicies(); + } + Collection allStoragePolicies = + this.vfs.getAllStoragePolicies(); + return (Collection) allStoragePolicies; + } + + @Override + public long getBytesWithFutureGenerationStamps() throws IOException { + if (this.vfs == null) { + return super.getBytesWithFutureGenerationStamps(); + } + checkDefaultDFS(defaultDFS, "getBytesWithFutureGenerationStamps"); + return defaultDFS.getBytesWithFutureGenerationStamps(); + } + + @Deprecated + @Override + public BlockStoragePolicy[] getStoragePolicies() throws IOException { + if (this.vfs == null) { + return super.getStoragePolicies(); + } + checkDefaultDFS(defaultDFS, "getStoragePolicies"); + return defaultDFS.getStoragePolicies(); + } + + @Override + //Make sure your target fs supports this API, otherwise you will get + // Unsupported operation exception. + public void concat(Path trg, Path[] psrcs) throws IOException { + if (this.vfs == null) { + super.concat(trg, psrcs); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(trg, getConf()); + mountPathInfo.getTargetFs().concat(mountPathInfo.getPathOnTarget(), psrcs); + } + + @SuppressWarnings("deprecation") + @Override + public boolean rename(final Path src, final Path dst) throws IOException { + if (this.vfs == null) { + return super.rename(src, dst); + } + return this.vfs.rename(src, dst); + } + + @SuppressWarnings("deprecation") + @Override + public void rename(Path src, Path dst, final Options.Rename... options) + throws IOException { + if (this.vfs == null) { + super.rename(src, dst, options); + return; + } + + // TODO: revisit + ViewFileSystemOverloadScheme.MountPathInfo mountSrcPathInfo = + this.vfs.getMountPathInfo(src, getConf()); + checkDFS(mountSrcPathInfo.getTargetFs(), "rename"); + + ViewFileSystemOverloadScheme.MountPathInfo mountDstPathInfo = + this.vfs.getMountPathInfo(src, getConf()); + checkDFS(mountDstPathInfo.getTargetFs(), "rename"); + + //Check both in same cluster. + if (!mountSrcPathInfo.getTargetFs().getUri() + .equals(mountDstPathInfo.getTargetFs().getUri())) { + throw new HadoopIllegalArgumentException( + "Can't rename across file systems."); + } + + ((DistributedFileSystem) mountSrcPathInfo.getTargetFs()) + .rename(mountSrcPathInfo.getPathOnTarget(), + mountDstPathInfo.getPathOnTarget(), options); + } + + @Override + public boolean truncate(final Path f, final long newLength) + throws IOException { + if (this.vfs == null) { + return super.truncate(f, newLength); + } + return this.vfs.truncate(f, newLength); + } + + public boolean delete(final Path f, final boolean recursive) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + return super.delete(f, recursive); + } + return this.vfs.delete(f, recursive); + } + + @Override + public ContentSummary getContentSummary(Path f) throws IOException { + if (this.vfs == null) { + return super.getContentSummary(f); + } + return this.vfs.getContentSummary(f); + } + + @Override + public QuotaUsage getQuotaUsage(Path f) throws IOException { + if (this.vfs == null) { + return super.getQuotaUsage(f); + } + return this.vfs.getQuotaUsage(f); + } + + @Override + public void setQuota(Path src, final long namespaceQuota, + final long storagespaceQuota) throws IOException { + if (this.vfs == null) { + super.setQuota(src, namespaceQuota, storagespaceQuota); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(src, getConf()); + mountPathInfo.getTargetFs() + .setQuota(mountPathInfo.getPathOnTarget(), namespaceQuota, + storagespaceQuota); + } + + @Override + public void setQuotaByStorageType(Path src, final StorageType type, + final long quota) throws IOException { + if (this.vfs == null) { + super.setQuotaByStorageType(src, type, quota); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(src, getConf()); + mountPathInfo.getTargetFs() + .setQuotaByStorageType(mountPathInfo.getPathOnTarget(), type, quota); + } + + @Override + public FileStatus[] listStatus(Path p) throws IOException { + if (this.vfs == null) { + return super.listStatus(p); + } + return this.vfs.listStatus(p); + } + + @Override + public RemoteIterator listLocatedStatus(final Path f, + final PathFilter filter) throws FileNotFoundException, IOException { + if (this.vfs == null) { + return super.listLocatedStatus(f, filter); + } + return this.vfs.listLocatedStatus(f, filter); + } + + @Override + public RemoteIterator listStatusIterator(final Path p) + throws IOException { + if (this.vfs == null) { + return super.listStatusIterator(p); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(p, getConf()); + return mountPathInfo.getTargetFs() + .listStatusIterator(mountPathInfo.getPathOnTarget()); + } + + @Override + public RemoteIterator> batchedListStatusIterator( + final List paths) throws IOException { + if (this.vfs == null) { + return super.batchedListStatusIterator(paths); + } + // TODO: revisit for correct implementation. + return this.defaultDFS.batchedListStatusIterator(paths); + } + + @Override + public RemoteIterator> batchedListLocatedStatusIterator( + final List paths) throws IOException { + if (this.vfs == null) { + return super.batchedListLocatedStatusIterator(paths); + } + // TODO: revisit for correct implementation. + return this.defaultDFS.batchedListLocatedStatusIterator(paths); + } + + public boolean mkdir(Path f, FsPermission permission) throws IOException { + if (this.vfs == null) { + return super.mkdir(f, permission); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "mkdir"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .mkdir(mountPathInfo.getPathOnTarget(), permission); + } + + @Override + public boolean mkdirs(Path f, FsPermission permission) throws IOException { + if (this.vfs == null) { + return super.mkdirs(f, permission); + } + return this.vfs.mkdirs(f, permission); + } + + @SuppressWarnings("deprecation") + @Override + protected boolean primitiveMkdir(Path f, FsPermission absolutePermission) + throws IOException { + if (this.vfs == null) { + return super.primitiveMkdir(f, absolutePermission); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "primitiveMkdir"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .primitiveMkdir(mountPathInfo.getPathOnTarget(), absolutePermission); + } + + @Override + public void close() throws IOException { + if (this.vfs != null) { + this.vfs.close(); + } + super.close(); + } + + @InterfaceAudience.Private + @Override + public DFSClient getClient() { + if (this.vfs == null) { + return super.getClient(); + } + checkDefaultDFS(defaultDFS, "getClient"); + return defaultDFS.getClient(); + } + + @Override + public FsStatus getStatus(Path p) throws IOException { + if (this.vfs == null) { + return super.getStatus(p); + } + return this.vfs.getStatus(p); + } + + @Override + public long getMissingBlocksCount() throws IOException { + if (this.vfs == null) { + return super.getMissingBlocksCount(); + } + checkDefaultDFS(defaultDFS, "getMissingBlocksCount"); + return defaultDFS.getMissingBlocksCount(); + } + + @Override + public long getPendingDeletionBlocksCount() throws IOException { + if (this.vfs == null) { + return super.getPendingDeletionBlocksCount(); + } + checkDefaultDFS(defaultDFS, "getPendingDeletionBlocksCount"); + return defaultDFS.getPendingDeletionBlocksCount(); + } + + @Override + public long getMissingReplOneBlocksCount() throws IOException { + if (this.vfs == null) { + return super.getMissingReplOneBlocksCount(); + } + checkDefaultDFS(defaultDFS, "getMissingReplOneBlocksCount"); + return defaultDFS.getMissingReplOneBlocksCount(); + } + + @Override + public long getLowRedundancyBlocksCount() throws IOException { + if (this.vfs == null) { + return super.getLowRedundancyBlocksCount(); + } + checkDefaultDFS(defaultDFS, "getLowRedundancyBlocksCount"); + return defaultDFS.getLowRedundancyBlocksCount(); + } + + @Override + public long getCorruptBlocksCount() throws IOException { + if (this.vfs == null) { + return super.getCorruptBlocksCount(); + } + checkDefaultDFS(defaultDFS, "getCorruptBlocksCount"); + return defaultDFS.getLowRedundancyBlocksCount(); + } + + @Override + public RemoteIterator listCorruptFileBlocks(final Path path) + throws IOException { + if (this.vfs == null) { + return super.listCorruptFileBlocks(path); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + return mountPathInfo.getTargetFs() + .listCorruptFileBlocks(mountPathInfo.getPathOnTarget()); + } + + @Override + public DatanodeInfo[] getDataNodeStats() throws IOException { + if (this.vfs == null) { + return super.getDataNodeStats(); + } + checkDefaultDFS(defaultDFS, "getDataNodeStats"); + return defaultDFS.getDataNodeStats(); + } + + @Override + public DatanodeInfo[] getDataNodeStats( + final HdfsConstants.DatanodeReportType type) throws IOException { + if (this.vfs == null) { + return super.getDataNodeStats(type); + } + checkDefaultDFS(defaultDFS, "getDataNodeStats"); + return defaultDFS.getDataNodeStats(type); + } + + @Override + public boolean setSafeMode(HdfsConstants.SafeModeAction action) + throws IOException { + if (this.vfs == null) { + return super.setSafeMode(action); + } + checkDefaultDFS(defaultDFS, "setSafeMode"); + return defaultDFS.setSafeMode(action); + } + + @Override + public boolean setSafeMode(HdfsConstants.SafeModeAction action, + boolean isChecked) throws IOException { + if (this.vfs == null) { + return super.setSafeMode(action, isChecked); + } + checkDefaultDFS(defaultDFS, "setSafeMode"); + return defaultDFS.setSafeMode(action, isChecked); + } + + @Override + public boolean saveNamespace(long timeWindow, long txGap) throws IOException { + if (this.vfs == null) { + return super.saveNamespace(timeWindow, txGap); + } + checkDefaultDFS(defaultDFS, "saveNamespace"); + return defaultDFS.saveNamespace(timeWindow, txGap); + } + + @Override + public void saveNamespace() throws IOException { + if (this.vfs == null) { + super.saveNamespace(); + return; + } + checkDefaultDFS(defaultDFS, "saveNamespace"); + defaultDFS.saveNamespace(); + } + + @Override + public long rollEdits() throws IOException { + if (this.vfs == null) { + return super.rollEdits(); + } + checkDefaultDFS(defaultDFS, "rollEdits"); + return defaultDFS.rollEdits(); + } + + @Override + public boolean restoreFailedStorage(String arg) throws IOException { + if (this.vfs == null) { + return super.restoreFailedStorage(arg); + } + checkDefaultDFS(defaultDFS, "restoreFailedStorage"); + return defaultDFS.restoreFailedStorage(arg); + } + + @Override + public void refreshNodes() throws IOException { + if (this.vfs == null) { + super.refreshNodes(); + return; + } + checkDefaultDFS(defaultDFS, "refreshNodes"); + defaultDFS.refreshNodes(); + } + + @Override + public void finalizeUpgrade() throws IOException { + if (this.vfs == null) { + super.finalizeUpgrade(); + return; + } + checkDefaultDFS(defaultDFS, "finalizeUpgrade"); + defaultDFS.finalizeUpgrade(); + } + + @Override + public boolean upgradeStatus() throws IOException { + if (this.vfs == null) { + return super.upgradeStatus(); + } + checkDefaultDFS(defaultDFS, "upgradeStatus"); + return defaultDFS.upgradeStatus(); + } + + @Override + public RollingUpgradeInfo rollingUpgrade( + HdfsConstants.RollingUpgradeAction action) throws IOException { + if (this.vfs == null) { + return super.rollingUpgrade(action); + } + checkDefaultDFS(defaultDFS, "rollingUpgrade"); + return defaultDFS.rollingUpgrade(action); + } + + @Override + public void metaSave(String pathname) throws IOException { + if (this.vfs == null) { + super.metaSave(pathname); + return; + } + checkDefaultDFS(defaultDFS, "metaSave"); + defaultDFS.metaSave(pathname); + } + + @Override + public FsServerDefaults getServerDefaults() throws IOException { + if (this.vfs == null) { + return super.getServerDefaults(); + } + checkDefaultDFS(defaultDFS, "getServerDefaults"); + //TODO: Need to revisit. + return defaultDFS.getServerDefaults(); + } + + @Override + public FileStatus getFileStatus(final Path f) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + return super.getFileStatus(f); + } + return this.vfs.getFileStatus(f); + } + + @SuppressWarnings("deprecation") + @Override + public void createSymlink(final Path target, final Path link, + final boolean createParent) throws IOException { + // Regular DFS behavior + if (this.vfs == null) { + super.createSymlink(target, link, createParent); + return; + } + + throw new UnsupportedOperationException( + "createSymlink is not supported in ViewHDFS"); + } + + @Override + public boolean supportsSymlinks() { + if (this.vfs == null) { + return super.supportsSymlinks(); + } + // we can enabled later if we want to support symlinks. + return false; + } + + @Override + public FileStatus getFileLinkStatus(final Path f) throws IOException { + if (this.vfs == null) { + return super.getFileLinkStatus(f); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + return mountPathInfo.getTargetFs() + .getFileLinkStatus(mountPathInfo.getPathOnTarget()); + } + + @Override + public Path getLinkTarget(Path path) throws IOException { + if(this.vfs==null){ + return super.getLinkTarget(path); + } + return this.vfs.getLinkTarget(path); + } + + @Override + protected Path resolveLink(Path f) throws IOException { + if(this.vfs==null){ + return super.resolveLink(f); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(f, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "resolveLink"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .resolveLink(mountPathInfo.getPathOnTarget()); + } + + @Override + public FileChecksum getFileChecksum(final Path f) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + return super.getFileChecksum(f); + } + return this.vfs.getFileChecksum(f); + } + + @Override + public void setPermission(final Path f, final FsPermission permission) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + super.setPermission(f, permission); + return; + } + this.vfs.setPermission(f, permission); + } + + @Override + public void setOwner(final Path f, final String username, + final String groupname) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + super.setOwner(f, username, groupname); + return; + } + this.vfs.setOwner(f, username, groupname); + } + + @Override + public void setTimes(final Path f, final long mtime, final long atime) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + super.setTimes(f, mtime, atime); + return; + } + this.vfs.setTimes(f, mtime, atime); + } + + @Override + // DFS specific API + protected int getDefaultPort() { + return super.getDefaultPort(); + } + + /** + * If no mount points configured, it works same as + * {@link DistributedFileSystem#getDelegationToken(String)}. If + * there are mount points configured and if default fs(linkFallback) + * configured, then it will return default fs delegation token. Otherwise + * it will return null. + */ + @Override + public Token getDelegationToken(String renewer) + throws IOException { + if (this.vfs == null) { + return super.getDelegationToken(renewer); + } + + if (defaultDFS != null) { + return defaultDFS.getDelegationToken(renewer); + } + return null; + } + + @Override + public void setBalancerBandwidth(long bandwidth) throws IOException { + if (this.vfs == null) { + super.setBalancerBandwidth(bandwidth); + return; + } + checkDefaultDFS(defaultDFS, "setBalancerBandwidth"); + defaultDFS.setBalancerBandwidth(bandwidth); + } + + @Override + public String getCanonicalServiceName() { + if (this.vfs == null) { + return super.getCanonicalServiceName(); + } + checkDefaultDFS(defaultDFS, "getCanonicalServiceName"); + return defaultDFS.getCanonicalServiceName(); + } + + @Override + protected URI canonicalizeUri(URI uri) { + if (this.vfs == null) { + return super.canonicalizeUri(uri); + } + + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = null; + try { + mountPathInfo = this.vfs.getMountPathInfo(new Path(uri), getConf()); + } catch (IOException e) { + LOGGER.warn("Failed to resolve the uri as mount path", e); + return null; + } + checkDFS(mountPathInfo.getTargetFs(), "canonicalizeUri"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .canonicalizeUri(uri); + } + + @Override + public boolean isInSafeMode() throws IOException { + if (this.vfs == null) { + return super.isInSafeMode(); + } + checkDefaultDFS(defaultDFS, "isInSafeMode"); + return defaultDFS.isInSafeMode(); + } + + @Override + // DFS specific API + public void allowSnapshot(Path path) throws IOException { + if (this.vfs == null) { + super.allowSnapshot(path); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "allowSnapshot"); + ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .allowSnapshot(mountPathInfo.getPathOnTarget()); + } + + @Override + public void disallowSnapshot(final Path path) throws IOException { + if (this.vfs == null) { + super.disallowSnapshot(path); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "disallowSnapshot"); + ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .disallowSnapshot(mountPathInfo.getPathOnTarget()); + } + + @Override + public Path createSnapshot(Path path, String snapshotName) + throws IOException { + if (this.vfs == null) { + return super.createSnapshot(path, snapshotName); + } + return this.vfs.createSnapshot(path, snapshotName); + } + + @Override + public void renameSnapshot(Path path, String snapshotOldName, + String snapshotNewName) throws IOException { + if (this.vfs == null) { + super.renameSnapshot(path, snapshotOldName, snapshotNewName); + return; + } + this.vfs.renameSnapshot(path, snapshotOldName, snapshotNewName); + } + + @Override + //Ony for HDFS users + public SnapshottableDirectoryStatus[] getSnapshottableDirListing() + throws IOException { + if (this.vfs == null) { + return super.getSnapshottableDirListing(); + } + checkDefaultDFS(defaultDFS, "getSnapshottableDirListing"); + return defaultDFS.getSnapshottableDirListing(); + } + + @Override + public void deleteSnapshot(Path path, String snapshotName) + throws IOException { + if (this.vfs == null) { + super.deleteSnapshot(path, snapshotName); + return; + } + this.vfs.deleteSnapshot(path, snapshotName); + } + + @Override + public RemoteIterator snapshotDiffReportListingRemoteIterator( + final Path snapshotDir, final String fromSnapshot, + final String toSnapshot) throws IOException { + if (this.vfs == null) { + return super + .snapshotDiffReportListingRemoteIterator(snapshotDir, fromSnapshot, + toSnapshot); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(snapshotDir, getConf()); + checkDFS(mountPathInfo.getTargetFs(), + "snapshotDiffReportListingRemoteIterator"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .snapshotDiffReportListingRemoteIterator( + mountPathInfo.getPathOnTarget(), fromSnapshot, toSnapshot); + } + + @Override + public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir, + final String fromSnapshot, final String toSnapshot) throws IOException { + if (this.vfs == null) { + return super.getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(snapshotDir, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "getSnapshotDiffReport"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .getSnapshotDiffReport(mountPathInfo.getPathOnTarget(), fromSnapshot, + toSnapshot); + } + + @Override + public boolean isFileClosed(final Path src) throws IOException { + if (this.vfs == null) { + return super.isFileClosed(src); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(src, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "isFileClosed"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .isFileClosed(mountPathInfo.getPathOnTarget()); + } + + @Override + public long addCacheDirective(CacheDirectiveInfo info) throws IOException { + if (this.vfs == null) { + return super.addCacheDirective(info); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(info.getPath(), getConf()); + checkDFS(mountPathInfo.getTargetFs(), "addCacheDirective"); + + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .addCacheDirective(new CacheDirectiveInfo.Builder(info) + .setPath(mountPathInfo.getPathOnTarget()).build()); + } + + @Override + public long addCacheDirective(CacheDirectiveInfo info, + EnumSet flags) throws IOException { + if (this.vfs == null) { + return super.addCacheDirective(info, flags); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(info.getPath(), getConf()); + checkDFS(mountPathInfo.getTargetFs(), "addCacheDirective"); + + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .addCacheDirective(new CacheDirectiveInfo.Builder(info) + .setPath(mountPathInfo.getPathOnTarget()).build(), flags); + } + + @Override + public void modifyCacheDirective(CacheDirectiveInfo info) throws IOException { + if (this.vfs == null) { + super.modifyCacheDirective(info); + return; + } + if (info.getPath() != null) { + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(info.getPath(), getConf()); + checkDFS(mountPathInfo.getTargetFs(), "modifyCacheDirective"); + ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .modifyCacheDirective(new CacheDirectiveInfo.Builder(info) + .setPath(mountPathInfo.getPathOnTarget()).build()); + return; + } + + // No path available in CacheDirectiveInfo, Let's shoot to all child fs. + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + dfs.modifyCacheDirective(info); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + } + + @Override + public void modifyCacheDirective(CacheDirectiveInfo info, + EnumSet flags) throws IOException { + if (this.vfs == null) { + super.modifyCacheDirective(info, flags); + return; + } + if (info.getPath() != null) { + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(info.getPath(), getConf()); + checkDFS(mountPathInfo.getTargetFs(), "modifyCacheDirective"); + ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .modifyCacheDirective(new CacheDirectiveInfo.Builder(info) + .setPath(mountPathInfo.getPathOnTarget()).build(), flags); + return; + } + // No path available in CacheDirectiveInfo, Let's shoot to all child fs. + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + dfs.modifyCacheDirective(info, flags); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + } + + @Override + public void removeCacheDirective(long id) throws IOException { + if (this.vfs == null) { + super.removeCacheDirective(id); + return; + } + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + dfs.removeCacheDirective(id); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + } + + @Override + public RemoteIterator listCacheDirectives( + CacheDirectiveInfo filter) throws IOException { + if (this.vfs == null) { + return super.listCacheDirectives(filter); + } + + if (filter != null && filter.getPath() != null) { + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(filter.getPath(), getConf()); + checkDFS(mountPathInfo.getTargetFs(), "listCacheDirectives"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .listCacheDirectives(new CacheDirectiveInfo.Builder(filter) + .setPath(mountPathInfo.getPathOnTarget()).build()); + } + + // No path available in filter. Let's try to shoot to all child fs. + final List> iters = new ArrayList<>(); + for (FileSystem fs : getChildFileSystems()) { + if (fs instanceof DistributedFileSystem) { + iters.add(((DistributedFileSystem) fs).listCacheDirectives(filter)); + } + } + if (iters.size() == 0) { + throw new UnsupportedOperationException( + "No DFS found in child fs. This API can't be supported in non DFS"); + } + + return new RemoteIterator() { + int currIdx = 0; + RemoteIterator currIter = iters.get(currIdx++); + + @Override + public boolean hasNext() throws IOException { + if (currIter.hasNext()) { + return true; + } + while (currIdx < iters.size()) { + currIter = iters.get(currIdx++); + if (currIter.hasNext()) { + return true; + } + } + return false; + } + + @Override + public CacheDirectiveEntry next() throws IOException { + if (hasNext()) { + return currIter.next(); + } + throw new NoSuchElementException("No more elements"); + } + }; + } + + //Currently Cache pool APIs supported only in default cluster. + @Override + public void addCachePool(CachePoolInfo info) throws IOException { + if (this.vfs == null) { + super.addCachePool(info); + return; + } + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + dfs.addCachePool(info); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + } + + @Override + public void modifyCachePool(CachePoolInfo info) throws IOException { + if (this.vfs == null) { + super.modifyCachePool(info); + return; + } + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + dfs.modifyCachePool(info); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + } + + @Override + public void removeCachePool(String poolName) throws IOException { + if (this.vfs == null) { + super.removeCachePool(poolName); + return; + } + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + dfs.removeCachePool(poolName); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + } + + @Override + public RemoteIterator listCachePools() throws IOException { + if (this.vfs == null) { + return super.listCachePools(); + } + + List childDFSs = new ArrayList<>(); + for (FileSystem fs : getChildFileSystems()) { + if (fs instanceof DistributedFileSystem) { + childDFSs.add((DistributedFileSystem) fs); + } + } + if (childDFSs.size() == 0) { + throw new UnsupportedOperationException( + "No DFS found in child fs. This API can't be supported in non DFS"); + } + return new RemoteIterator() { + int curDfsIdx = 0; + RemoteIterator currIter = + childDFSs.get(curDfsIdx++).listCachePools(); + + @Override + public boolean hasNext() throws IOException { + if (currIter.hasNext()) { + return true; + } + while (curDfsIdx < childDFSs.size()) { + currIter = childDFSs.get(curDfsIdx++).listCachePools(); + if (currIter.hasNext()) { + return true; + } + } + return false; + } + + @Override + public CachePoolEntry next() throws IOException { + if (hasNext()) { + return currIter.next(); + } + throw new java.util.NoSuchElementException("No more entries"); + } + }; + } + + @Override + public void modifyAclEntries(Path path, List aclSpec) + throws IOException { + if (this.vfs == null) { + super.modifyAclEntries(path, aclSpec); + return; + } + this.vfs.modifyAclEntries(path, aclSpec); + } + + @Override + public void removeAclEntries(Path path, List aclSpec) + throws IOException { + if (this.vfs == null) { + super.removeAclEntries(path, aclSpec); + return; + } + this.vfs.removeAclEntries(path, aclSpec); + } + + @Override + public void removeDefaultAcl(Path path) throws IOException { + if (this.vfs == null) { + super.removeDefaultAcl(path); + return; + } + this.vfs.removeDefaultAcl(path); + } + + @Override + public void removeAcl(Path path) throws IOException { + if (this.vfs == null) { + super.removeAcl(path); + return; + } + this.vfs.removeAcl(path); + } + + @Override + public void setAcl(Path path, List aclSpec) throws IOException { + if (this.vfs == null) { + super.setAcl(path, aclSpec); + return; + } + this.vfs.setAcl(path, aclSpec); + } + + @Override + public AclStatus getAclStatus(Path path) throws IOException { + if (this.vfs == null) { + return super.getAclStatus(path); + } + return this.vfs.getAclStatus(path); + } + + @Override + public void createEncryptionZone(final Path path, final String keyName) + throws IOException { + if (this.vfs == null) { + super.createEncryptionZone(path, keyName); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "createEncryptionZone"); + ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .createEncryptionZone(mountPathInfo.getPathOnTarget(), keyName); + } + + @Override + public EncryptionZone getEZForPath(final Path path) throws IOException { + if (this.vfs == null) { + return super.getEZForPath(path); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "getEZForPath"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .getEZForPath(mountPathInfo.getPathOnTarget()); + } + + /** + * Returns the results from default DFS (fallback). If you want the results + * from specific clusters, please invoke them on child fs instance directly. + */ + @Override + public RemoteIterator listEncryptionZones() + throws IOException { + if (this.vfs == null) { + return super.listEncryptionZones(); + } + checkDefaultDFS(defaultDFS, "listEncryptionZones"); + return defaultDFS.listEncryptionZones(); + } + + @Override + public void reencryptEncryptionZone(final Path zone, + final HdfsConstants.ReencryptAction action) throws IOException { + if (this.vfs == null) { + super.reencryptEncryptionZone(zone, action); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(zone, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "reencryptEncryptionZone"); + ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .reencryptEncryptionZone(mountPathInfo.getPathOnTarget(), action); + } + + /** + * Returns the results from default DFS (fallback). If you want the results + * from specific clusters, please invoke them on child fs instance directly. + */ + @Override + public RemoteIterator listReencryptionStatus() + throws IOException { + if (this.vfs == null) { + return super.listReencryptionStatus(); + } + checkDefaultDFS(defaultDFS, "listReencryptionStatus"); + return defaultDFS.listReencryptionStatus(); + } + + @Override + public FileEncryptionInfo getFileEncryptionInfo(final Path path) + throws IOException { + if (this.vfs == null) { + return super.getFileEncryptionInfo(path); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "getFileEncryptionInfo"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .getFileEncryptionInfo(mountPathInfo.getPathOnTarget()); + } + + @Override + public void provisionEZTrash(final Path path, + final FsPermission trashPermission) throws IOException { + if (this.vfs == null) { + super.provisionEZTrash(path, trashPermission); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "provisionEZTrash"); + ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .provisionEZTrash(mountPathInfo.getPathOnTarget(), trashPermission); + } + + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + if (this.vfs == null) { + super.setXAttr(path, name, value, flag); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + mountPathInfo.getTargetFs() + .setXAttr(mountPathInfo.getPathOnTarget(), name, value, flag); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + if (this.vfs == null) { + return super.getXAttr(path, name); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + return mountPathInfo.getTargetFs() + .getXAttr(mountPathInfo.getPathOnTarget(), name); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + if (this.vfs == null) { + return super.getXAttrs(path); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + return mountPathInfo.getTargetFs() + .getXAttrs(mountPathInfo.getPathOnTarget()); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + if (this.vfs == null) { + return super.getXAttrs(path, names); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + return mountPathInfo.getTargetFs() + .getXAttrs(mountPathInfo.getPathOnTarget(), names); + } + + @Override + public List listXAttrs(Path path) throws IOException { + if (this.vfs == null) { + return super.listXAttrs(path); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + return mountPathInfo.getTargetFs() + .listXAttrs(mountPathInfo.getPathOnTarget()); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + if (this.vfs == null) { + super.removeXAttr(path, name); + return; + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + mountPathInfo.getTargetFs() + .removeXAttr(mountPathInfo.getPathOnTarget(), name); + } + + @Override + public void access(Path path, FsAction mode) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + super.access(path, mode); + return; + } + this.vfs.access(path, mode); + } + + @Override + public URI getKeyProviderUri() throws IOException { + if (this.vfs == null) { + return super.getKeyProviderUri(); + } + checkDefaultDFS(defaultDFS, "getKeyProviderUri"); + return defaultDFS.getKeyProviderUri(); + } + + @Override + public KeyProvider getKeyProvider() throws IOException { + if (this.vfs == null) { + return super.getKeyProvider(); + } + checkDefaultDFS(defaultDFS, "getKeyProvider"); + return defaultDFS.getKeyProvider(); + } + + @Override + public DelegationTokenIssuer[] getAdditionalTokenIssuers() + throws IOException { + if (this.vfs == null) { + return super.getChildFileSystems(); + } + + return this.vfs.getChildFileSystems(); + } + + @Override + public DFSInotifyEventInputStream getInotifyEventStream() throws IOException { + if (this.vfs == null) { + return super.getInotifyEventStream(); + } + checkDefaultDFS(defaultDFS, "getInotifyEventStream"); + return defaultDFS.getInotifyEventStream(); + } + + @Override + public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) + throws IOException { + if (this.vfs == null) { + return super.getInotifyEventStream(); + } + checkDefaultDFS(defaultDFS, "getInotifyEventStream"); + return defaultDFS.getInotifyEventStream(); + } + + @Override + // DFS only API. + public void setErasureCodingPolicy(final Path path, final String ecPolicyName) + throws IOException { + if (this.vfs == null) { + super.setErasureCodingPolicy(path, ecPolicyName); + return; + } + + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "setErasureCodingPolicy"); + ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .setErasureCodingPolicy(mountPathInfo.getPathOnTarget(), ecPolicyName); + } + + @Override + public void satisfyStoragePolicy(Path src) throws IOException { + if (this.vfs == null) { + super.satisfyStoragePolicy(src); + return; + } + this.vfs.satisfyStoragePolicy(src); + } + + @Override + public ErasureCodingPolicy getErasureCodingPolicy(final Path path) + throws IOException { + if (this.vfs == null) { + return super.getErasureCodingPolicy(path); + } + + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "getErasureCodingPolicy"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .getErasureCodingPolicy(mountPathInfo.getPathOnTarget()); + } + + /** + * Gets all erasure coding policies from all available child file systems. + */ + @Override + public Collection getAllErasureCodingPolicies() + throws IOException { + if (this.vfs == null) { + return super.getAllErasureCodingPolicies(); + } + FileSystem[] childFss = getChildFileSystems(); + List results = new ArrayList<>(); + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + for (FileSystem fs : childFss) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + results.addAll(dfs.getAllErasureCodingPolicies()); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + return results; + } + + @Override + public Map getAllErasureCodingCodecs() throws IOException { + if (this.vfs == null) { + return super.getAllErasureCodingCodecs(); + } + FileSystem[] childFss = getChildFileSystems(); + Map results = new HashMap<>(); + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + for (FileSystem fs : childFss) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + results.putAll(dfs.getAllErasureCodingCodecs()); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + return results; + } + + @Override + public AddErasureCodingPolicyResponse[] addErasureCodingPolicies( + ErasureCodingPolicy[] policies) throws IOException { + if (this.vfs == null) { + return super.addErasureCodingPolicies(policies); + } + List failedExceptions = new ArrayList<>(); + List results = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + results.addAll(Arrays.asList(dfs.addErasureCodingPolicies(policies))); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + return results.toArray(new AddErasureCodingPolicyResponse[results.size()]); + } + + @Override + public void removeErasureCodingPolicy(String ecPolicyName) + throws IOException { + if (this.vfs == null) { + super.removeErasureCodingPolicy(ecPolicyName); + return; + } + + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + dfs.removeErasureCodingPolicy(ecPolicyName); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + } + + @Override + public void enableErasureCodingPolicy(String ecPolicyName) + throws IOException { + if (this.vfs == null) { + super.enableErasureCodingPolicy(ecPolicyName); + return; + } + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + dfs.enableErasureCodingPolicy(ecPolicyName); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + } + + @Override + public void disableErasureCodingPolicy(String ecPolicyName) + throws IOException { + if (this.vfs == null) { + super.disableErasureCodingPolicy(ecPolicyName); + return; + } + List failedExceptions = new ArrayList<>(); + boolean isDFSExistsInChilds = false; + + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + isDFSExistsInChilds = true; + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + dfs.disableErasureCodingPolicy(ecPolicyName); + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (!isDFSExistsInChilds) { + throw new UnsupportedOperationException( + "No DFS available in child file systems."); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + } + + @Override + public void unsetErasureCodingPolicy(final Path path) throws IOException { + if (this.vfs == null) { + super.unsetErasureCodingPolicy(path); + return; + } + + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(path, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "unsetErasureCodingPolicy"); + ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .unsetErasureCodingPolicy(mountPathInfo.getPathOnTarget()); + } + + @Override + public ECTopologyVerifierResult getECTopologyResultForPolicies( + final String... policyNames) throws IOException { + if (this.vfs == null) { + return super.getECTopologyResultForPolicies(policyNames); + } + + List failedExceptions = new ArrayList<>(); + ECTopologyVerifierResult result = null; + for (FileSystem fs : getChildFileSystems()) { + if (!(fs instanceof DistributedFileSystem)) { + continue; + } + DistributedFileSystem dfs = (DistributedFileSystem) fs; + try { + result = dfs.getECTopologyResultForPolicies(policyNames); + if (!result.isSupported()) { + // whenever we see negative result. + return result; + } + } catch (IOException ioe) { + failedExceptions.add(ioe); + } + } + if (result == null) { + throw new UnsupportedOperationException( + "No DFS available in child filesystems"); + } + if (failedExceptions.size() > 0) { + throw MultipleIOException.createIOException(failedExceptions); + } + // Let's just return the last one. + return result; + } + + @Override + public Path getTrashRoot(Path path) { + if (this.vfs == null) { + return super.getTrashRoot(path); + } + return this.vfs.getTrashRoot(path); + } + + @Override + public Collection getTrashRoots(boolean allUsers) { + if (this.vfs == null) { + return super.getTrashRoots(allUsers); + } + List trashRoots = new ArrayList<>(); + for (FileSystem fs : getChildFileSystems()) { + trashRoots.addAll(fs.getTrashRoots(allUsers)); + } + return trashRoots; + } + + // Just proovided the same implementation as default in dfs as thats just + // delegated to FileSystem parent class. + @Override + protected Path fixRelativePart(Path p) { + return super.fixRelativePart(p); + } + + Statistics getFsStatistics() { + if (this.vfs == null) { + return super.getFsStatistics(); + } + return statistics; + } + + DFSOpsCountStatistics getDFSOpsCountStatistics() { + if (this.vfs == null) { + return super.getDFSOpsCountStatistics(); + } + return defaultDFS.getDFSOpsCountStatistics(); + } + + @Override + // Works only for HDFS + public HdfsDataOutputStreamBuilder createFile(Path path) { + if (this.vfs == null) { + return super.createFile(path); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = null; + try { + mountPathInfo = this.vfs.getMountPathInfo(path, getConf()); + } catch (IOException e) { + // TODO: can we return null here? + return null; + } + checkDFS(mountPathInfo.getTargetFs(), "createFile"); + return (HdfsDataOutputStreamBuilder) mountPathInfo.getTargetFs() + .createFile(mountPathInfo.getPathOnTarget()); + } + + @Deprecated + @Override + public RemoteIterator listOpenFiles() throws IOException { + if (this.vfs == null) { + return super.listOpenFiles(); + } + checkDefaultDFS(defaultDFS, "listOpenFiles"); + return defaultDFS.listOpenFiles(); + } + + @Deprecated + @Override + public RemoteIterator listOpenFiles( + EnumSet openFilesTypes) + throws IOException { + if (this.vfs == null) { + return super.listOpenFiles(openFilesTypes); + } + checkDefaultDFS(defaultDFS, "listOpenFiles"); + return defaultDFS.listOpenFiles(openFilesTypes); + } + + @Override + public RemoteIterator listOpenFiles( + EnumSet openFilesTypes, String path) + throws IOException { + if (this.vfs == null) { + return super.listOpenFiles(openFilesTypes, path); + } + Path absF = fixRelativePart(new Path(path)); + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = + this.vfs.getMountPathInfo(absF, getConf()); + checkDFS(mountPathInfo.getTargetFs(), "listOpenFiles"); + return ((DistributedFileSystem) mountPathInfo.getTargetFs()) + .listOpenFiles(openFilesTypes, + mountPathInfo.getPathOnTarget().toString()); + } + + @Override + public HdfsDataOutputStreamBuilder appendFile(Path path) { + if (this.vfs == null) { + return super.appendFile(path); + } + ViewFileSystemOverloadScheme.MountPathInfo mountPathInfo = null; + try { + mountPathInfo = this.vfs.getMountPathInfo(path, getConf()); + } catch (IOException e) { + LOGGER.warn("Failed to resolve the path as mount path", e); + return null; + } + checkDFS(mountPathInfo.getTargetFs(), "appendFile"); + return (HdfsDataOutputStreamBuilder) mountPathInfo.getTargetFs() + .appendFile(mountPathInfo.getPathOnTarget()); + } + + @Override + public boolean hasPathCapability(Path path, String capability) + throws IOException { + if (this.vfs == null) { + return super.hasPathCapability(path, capability); + } + return this.vfs.hasPathCapability(path, capability); + } + + //Below API provided implementations are in ViewFS but not there in DFS. + @Override + public Path resolvePath(final Path f) throws IOException { + if (this.vfs == null) { + return super.resolvePath(f); + } + return this.vfs.resolvePath(f); + } + + @Override + @SuppressWarnings("deprecation") + public boolean delete(final Path f) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + return super.delete(f); + } + return this.vfs.delete(f); + } + + @Override + public FileChecksum getFileChecksum(final Path f, final long length) + throws AccessControlException, FileNotFoundException, IOException { + if (this.vfs == null) { + return super.getFileChecksum(f, length); + } + return this.vfs.getFileChecksum(f, length); + } + + @Override + public boolean mkdirs(Path dir) throws IOException { + if (this.vfs == null) { + return super.mkdirs(dir); + } + return this.vfs.mkdirs(dir); + } + + @Override + public long getDefaultBlockSize(Path f) { + if (this.vfs == null) { + return super.getDefaultBlockSize(f); + } + return this.vfs.getDefaultBlockSize(f); + } + + @Override + public short getDefaultReplication(Path f) { + if (this.vfs == null) { + return super.getDefaultReplication(f); + } + return this.vfs.getDefaultReplication(f); + } + + @Override + public FsServerDefaults getServerDefaults(Path f) throws IOException { + if (this.vfs == null) { + return super.getServerDefaults(f); + } + return this.vfs.getServerDefaults(f); + } + + @Override + public void setWriteChecksum(final boolean writeChecksum) { + if (this.vfs == null) { + super.setWriteChecksum(writeChecksum); + return; + } + this.vfs.setWriteChecksum(writeChecksum); + } + + @Override + public FileSystem[] getChildFileSystems() { + if (this.vfs == null) { + return super.getChildFileSystems(); + } + return this.vfs.getChildFileSystems(); + } + + public ViewFileSystem.MountPoint[] getMountPoints() { + if (this.vfs == null) { + return null; + } + return this.vfs.getMountPoints(); + } + + @Override + public FsStatus getStatus() throws IOException { + if (this.vfs == null) { + return super.getStatus(); + } + return this.vfs.getStatus(); + } + + @Override + public long getUsed() throws IOException { + if (this.vfs == null) { + return super.getUsed(); + } + return this.vfs.getUsed(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java index 0bb995553bc7f..f6e32c40e80fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java @@ -26,9 +26,9 @@ import org.apache.hadoop.fs.XAttr.NameSpace; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @InterfaceAudience.Private public class XAttrHelper { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java index 6cad69a46c4e8..30e7e00653bcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java @@ -47,6 +47,7 @@ public static Optional hasPathCapability(final Path path, case CommonPathCapabilities.FS_CHECKSUMS: case CommonPathCapabilities.FS_CONCAT: case CommonPathCapabilities.FS_LIST_CORRUPT_FILE_BLOCKS: + case CommonPathCapabilities.FS_MULTIPART_UPLOADER: case CommonPathCapabilities.FS_PATHHANDLES: case CommonPathCapabilities.FS_PERMISSIONS: case CommonPathCapabilities.FS_SNAPSHOTS: diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index 407462c6e757d..aef31023d5130 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -108,6 +108,9 @@ public interface HdfsClientConfigKeys { String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL = "dfs.client.use.legacy.blockreader.local"; boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false; + String DFS_CLIENT_READ_USE_CACHE_PRIORITY = + "dfs.client.read.use.cache.priority"; + boolean DFS_CLIENT_READ_USE_CACHE_PRIORITY_DEFAULT = false; String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY = "dfs.client.datanode-restart.timeout"; long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = 30; @@ -141,6 +144,8 @@ public interface HdfsClientConfigKeys { "dfs.short.circuit.shared.memory.watcher.interrupt.check.ms"; int DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT = 60000; + String DFS_CLIENT_SHORT_CIRCUIT_NUM = "dfs.client.short.circuit.num"; + int DFS_CLIENT_SHORT_CIRCUIT_NUM_DEFAULT = 1; String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY = "dfs.client.slow.io.warning.threshold.ms"; long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000; @@ -156,13 +161,9 @@ public interface HdfsClientConfigKeys { "dfs.client.deadnode.detection.enabled"; boolean DFS_CLIENT_DEAD_NODE_DETECTION_ENABLED_DEFAULT = false; - String DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY = - "dfs.client.deadnode.detection.deadnode.queue.max"; - int DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_DEFAULT = 100; - - String DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY = - "dfs.client.deadnode.detection.suspectnode.queue.max"; - int DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_DEFAULT = 1000; + String DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY = + "dfs.client.deadnode.detection.idle.sleep.ms"; + long DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_DEFAULT = 10000; String DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY = "dfs.client.deadnode.detection.probe.connection.timeout.ms"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java index 02e5deb499914..5a615bbd62de4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * The Hdfs implementation of {@link FSDataInputStream}. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java index d7331299ef3e6..8af3417ca9fdf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java @@ -28,7 +28,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSOutputStream; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * The Hdfs implementation of {@link FSDataOutputStream}. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java index 2109e6e1b7ecf..f9fd2b1ec1670 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java @@ -27,10 +27,11 @@ import java.io.IOException; import java.lang.reflect.Constructor; import java.net.InetSocketAddress; +import java.nio.channels.UnresolvedAddressException; import java.util.List; -import com.google.common.io.ByteArrayDataOutput; -import com.google.common.io.ByteStreams; +import org.apache.hadoop.thirdparty.com.google.common.io.ByteArrayDataOutput; +import org.apache.hadoop.thirdparty.com.google.common.io.ByteStreams; import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -73,8 +74,8 @@ import org.apache.hadoop.util.PerformanceAdvisory; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -475,7 +476,8 @@ private BlockReader getBlockReaderLocal() throws IOException { "giving up on BlockReaderLocal.", this, pathInfo); return null; } - ShortCircuitCache cache = clientContext.getShortCircuitCache(); + ShortCircuitCache cache = + clientContext.getShortCircuitCache(block.getBlockId()); ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId()); ShortCircuitReplicaInfo info = cache.fetchOrCreate(key, this); @@ -526,7 +528,8 @@ public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() { if (curPeer.fromCache) remainingCacheTries--; DomainPeer peer = (DomainPeer)curPeer.peer; Slot slot = null; - ShortCircuitCache cache = clientContext.getShortCircuitCache(); + ShortCircuitCache cache = + clientContext.getShortCircuitCache(block.getBlockId()); try { MutableBoolean usedPeer = new MutableBoolean(false); slot = cache.allocShmSlot(datanode, peer, usedPeer, @@ -581,7 +584,8 @@ public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() { */ private ShortCircuitReplicaInfo requestFileDescriptors(DomainPeer peer, Slot slot) throws IOException { - ShortCircuitCache cache = clientContext.getShortCircuitCache(); + ShortCircuitCache cache = + clientContext.getShortCircuitCache(block.getBlockId()); final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(peer.getOutputStream(), SMALL_BUFFER_SIZE)); SlotId slotId = slot == null ? null : slot.getSlotId(); @@ -823,7 +827,7 @@ private BlockReaderPeer nextTcpPeer() throws IOException { datanode); LOG.trace("nextTcpPeer: created newConnectedPeer {}", peer); return new BlockReaderPeer(peer, false); - } catch (IOException e) { + } catch (IOException | UnresolvedAddressException e) { LOG.trace("nextTcpPeer: failed to create newConnectedPeer connected to" + "{}", datanode); throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java index 9c1ef461b0433..e7ddb98e700e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.client.impl; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java index 0f2f54c28e331..f25f50cf05411 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java @@ -51,7 +51,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java index 07f0eee8dd5ad..7105322cd5c4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.client.impl; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -60,6 +60,8 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_READ_USE_CACHE_PRIORITY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_READ_USE_CACHE_PRIORITY_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT; @@ -140,6 +142,7 @@ public class DfsClientConf { private final long refreshReadBlockLocationsMS; private final ShortCircuitConf shortCircuitConf; + private final int clientShortCircuitNum; private final long hedgedReadThresholdMillis; private final int hedgedReadThreadpoolSize; @@ -150,6 +153,8 @@ public class DfsClientConf { private final boolean dataTransferTcpNoDelay; + private final boolean readUseCachePriority; + private final boolean deadNodeDetectionEnabled; private final long leaseHardLimitPeriod; @@ -260,14 +265,14 @@ public DfsClientConf(Configuration conf) { slowIoWarningThresholdMs = conf.getLong( DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY, DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT); + readUseCachePriority = conf.getBoolean(DFS_CLIENT_READ_USE_CACHE_PRIORITY, + DFS_CLIENT_READ_USE_CACHE_PRIORITY_DEFAULT); refreshReadBlockLocationsMS = conf.getLong( HdfsClientConfigKeys.DFS_CLIENT_REFRESH_READ_BLOCK_LOCATIONS_MS_KEY, HdfsClientConfigKeys. DFS_CLIENT_REFRESH_READ_BLOCK_LOCATIONS_MS_DEFAULT); - shortCircuitConf = new ShortCircuitConf(conf); - hedgedReadThresholdMillis = conf.getLong( HedgedRead.THRESHOLD_MILLIS_KEY, HedgedRead.THRESHOLD_MILLIS_DEFAULT); @@ -290,6 +295,17 @@ public DfsClientConf(Configuration conf) { leaseHardLimitPeriod = conf.getLong(HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_KEY, HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000; + + shortCircuitConf = new ShortCircuitConf(conf); + clientShortCircuitNum = conf.getInt( + HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM, + HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM_DEFAULT); + Preconditions.checkArgument(clientShortCircuitNum >= 1, + HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM + + "can't be less then 1."); + Preconditions.checkArgument(clientShortCircuitNum <= 5, + HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM + + "can't be more then 5."); } @SuppressWarnings("unchecked") @@ -595,6 +611,13 @@ public long getSlowIoWarningThresholdMs() { return slowIoWarningThresholdMs; } + /* + * @return the clientShortCircuitNum + */ + public int getClientShortCircuitNum() { + return clientShortCircuitNum; + } + /** * @return the hedgedReadThresholdMillis */ @@ -630,6 +653,13 @@ public long getleaseHardLimitPeriod() { return leaseHardLimitPeriod; } + /** + * @return the readUseCachePriority + */ + public boolean isReadUseCachePriority() { + return readUseCachePriority; + } + /** * @return the replicaAccessorBuilderClasses */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java index 8a040f21b8c03..d108af987cc8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java @@ -36,7 +36,7 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/SnapshotDiffReportGenerator.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/SnapshotDiffReportGenerator.java index 4dbe98858f5d5..89e2e41352a17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/SnapshotDiffReportGenerator.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/SnapshotDiffReportGenerator.java @@ -19,7 +19,7 @@ import java.util.*; -import com.google.common.primitives.SignedBytes; +import org.apache.hadoop.thirdparty.com.google.common.primitives.SignedBytes; import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing.DiffReportListingEntry; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderLocalMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderLocalMetrics.java index 61b497e29ca44..83bfb8b9ce5c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderLocalMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderLocalMetrics.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.client.impl.metrics; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java index edb81e9adb7ce..7bd3f969972c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java @@ -23,7 +23,7 @@ import java.util.LinkedList; import java.util.List; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java index e80f12aa0ee1c..3e5a4431372ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.DFSUtilClient; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java index 917457f974c38..db536901283c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.ipc.RemoteException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.htrace.core.TraceScope; import org.apache.htrace.core.Tracer; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java index 0278fc92d2e2f..2cb16879c3f6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.net.InetSocketAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java index 3559ab97d8fe1..02c4f9a1f21b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java index c8a2722621020..48b581dfe6acb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPartialListing.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPartialListing.java index c3e0ac585ebfd..d96c7892b76ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPartialListing.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPartialListing.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.protocol; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ipc.RemoteException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java index 29f1b6da6b270..f2d8135ab4dee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java @@ -22,14 +22,14 @@ import java.util.Comparator; import java.util.List; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.security.token.Token; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Associates a block with the Datanodes that contain its replicas @@ -158,7 +158,7 @@ public ExtendedBlock getBlock() { * {@link org.apache.hadoop.hdfs.protocol.LocatedBlock#updateCachedStorageInfo} * to update the cached Storage ID/Type arrays. */ - public DatanodeInfo[] getLocations() { + public DatanodeInfoWithStorage[] getLocations() { return locs; } @@ -268,6 +268,7 @@ public String toString() { + "; corrupt=" + corrupt + "; offset=" + offset + "; locs=" + Arrays.asList(locs) + + "; cachedLocs=" + Arrays.asList(cachedLocs) + "}"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java index dabeceacc3e3c..5d7b91343f239 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.protocol; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java index 7bc95c9350521..e6f20c9ce1b82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.Path; -import com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; import org.apache.hadoop.hdfs.DFSUtilClient; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReportListing.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReportListing.java index 79fd543c3be90..74329bc1e8a12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReportListing.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReportListing.java @@ -20,7 +20,7 @@ import java.util.Collections; import java.util.List; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.DFSUtilClient; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java index c7628d8dd91df..10884f27f90a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java index 486909466474c..cc958e35df116 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java @@ -27,9 +27,9 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto; import org.apache.hadoop.hdfs.util.ByteBufferOutputStream; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Shorts; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Shorts; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java index 6b717ecdfbff2..dc6d590ce630d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java @@ -28,8 +28,8 @@ import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java index 6992477496754..b58fbb8992a47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java @@ -24,7 +24,7 @@ import java.io.OutputStream; import java.util.ArrayList; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java index 8d6e318168b3d..94ae40098b561 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java @@ -57,10 +57,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Maps; -import com.google.common.net.InetAddresses; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; import org.apache.hadoop.thirdparty.protobuf.ByteString; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java index acd1e505cbbc0..86053ee32746a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java @@ -21,7 +21,7 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_NEW_QOP_KEY; import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.DataOutputStream; @@ -64,8 +64,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Negotiates SASL for DataTransferProtocol on behalf of a client. There are diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 4028b0e8fb245..47234e8b65d78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus; import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -181,7 +181,7 @@ static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy( InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int socketTimeout) throws IOException { RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); return RPC.getProxy(ClientDatanodeProtocolPB.class, RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), addr, ticket, conf, factory, socketTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 572c65b67b21d..6cdce59d1d825 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -24,7 +24,7 @@ import java.util.HashMap; import java.util.List; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -237,7 +237,7 @@ import org.apache.hadoop.io.retry.AsyncCallHandler; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -456,7 +456,7 @@ public void setPermission(String src, FsPermission permission) private void setAsyncReturnValue() { final AsyncGet asyncReturnMessage - = ProtobufRpcEngine.getAsyncReturnMessage(); + = ProtobufRpcEngine2.getAsyncReturnMessage(); final AsyncGet asyncGet = new AsyncGet() { @Override @@ -609,7 +609,8 @@ public void rename2(String src, String dst, Rename... options) for (Rename option : options) { if (option == Rename.OVERWRITE) { overwrite = true; - } else if (option == Rename.TO_TRASH) { + } + if (option == Rename.TO_TRASH) { toTrash = true; } } @@ -1569,7 +1570,7 @@ public AclStatus getAclStatus(String src) throws IOException { if (Client.isAsynchronousMode()) { rpcProxy.getAclStatus(null, req); final AsyncGet asyncReturnMessage - = ProtobufRpcEngine.getAsyncReturnMessage(); + = ProtobufRpcEngine2.getAsyncReturnMessage(); final AsyncGet asyncGet = new AsyncGet() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 9fc302464271d..b3932f908fffc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -28,12 +28,12 @@ import java.util.Map; import java.util.Set; -import com.google.common.base.Preconditions; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.collect.Lists; -import com.google.common.primitives.Shorts; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Shorts; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java index 5165887ece5f3..ce8a89b84acce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolTranslatorPB.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ListReconfigurablePropertiesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -84,7 +84,7 @@ static ReconfigurationProtocolPB createReconfigurationProtocolProxy( InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int socketTimeout) throws IOException { RPC.setProtocolEngine(conf, ReconfigurationProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); return RPC.getProxy(ReconfigurationProtocolPB.class, RPC.getProtocolVersion(ReconfigurationProtocolPB.class), addr, ticket, conf, factory, socketTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolUtils.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolUtils.java index a48c401927406..e387ca7d843d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ReconfigurationProtocolUtils.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusConfigChangeProto; import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetReconfigurationStatusResponseProto; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * This is a client side utility class that handles diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java index 1d395ec24ac72..4b2e6cd989707 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java @@ -27,7 +27,7 @@ import java.util.EnumSet; import java.util.Optional; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java index d753174e71bcc..dcbf6c8f0b92c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java @@ -33,7 +33,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A delegation token identifier that is specific to HDFS. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java index 2d1cfc135521f..c343d00e61371 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java @@ -33,7 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.DataChecksum; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.InvalidChecksumSizeException; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java index 505273ec9790f..130e8c1c9c728 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java index af34c49dd12bb..7e3954c562a58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.SerializationFeature; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java index 6b99a7f7ce23b..9cabeb9037fc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java @@ -50,7 +50,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A {@link org.apache.hadoop.io.retry.FailoverProxyProvider} implementation diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java index 496389a1ddc87..95078a7412d74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowPeerReports.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowPeerReports.java index 218e30df4d2dc..a3b3445d5a743 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowPeerReports.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowPeerReports.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.protocol; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java index 81cc68da07295..7bf768935d9d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java @@ -26,7 +26,7 @@ import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.DomainSocketWatcher; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * DfsClientShm is a subclass of ShortCircuitShm which is used by the diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java index 6b3d8e07ce082..4bd2b9bbe0cff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java @@ -43,8 +43,8 @@ import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.DomainSocketWatcher; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java index 760e920c232b5..40436395887a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java @@ -21,7 +21,7 @@ import java.net.InetSocketAddress; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.IOUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.hdfs.DFSUtilClient; @@ -30,9 +30,9 @@ import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.util.PerformanceAdvisory; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java index d4d898c892662..40e687c703637 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java @@ -53,9 +53,9 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Waitable; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java index 14116e2fdc1c6..86218aa0fcd8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java @@ -31,8 +31,8 @@ import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java index b9fcadae529b3..1cb123bb58f3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java @@ -39,9 +39,9 @@ import sun.misc.Unsafe; -import com.google.common.base.Preconditions; -import com.google.common.collect.ComparisonChain; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import javax.annotation.Nonnull; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java index 164b03bf203a3..059280e494678 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.Time; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java index 012d70895021d..460c611ef88eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.util; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.DFSStripedOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java index 31de804d74ee9..c83ccf9d738ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java @@ -31,8 +31,8 @@ import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.FSInputStream; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.net.HttpHeaders; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.net.HttpHeaders; import javax.annotation.Nonnull; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index e846b56200993..87932cc38952d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -19,9 +19,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java index 870103eddf021..dfc754b3a819e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class implements the aspects that relate to delegation tokens for all diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java index 8b6c7f7cfd02a..589afb4604d3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Utilities for handling URLs diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index d0b10cbbcf813..ca59cf396ee11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -76,10 +76,12 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.GlobalStorageStatistics; import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; +import org.apache.hadoop.fs.MultipartUploaderBuilder; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.PathCapabilities; import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.impl.FileSystemMultipartUploaderBuilder; import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.hdfs.DFSOpsCountStatistics; import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType; @@ -132,10 +134,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; @@ -152,6 +154,7 @@ public class WebHdfsFileSystem extends FileSystem + "/v" + VERSION; public static final String EZ_HEADER = "X-Hadoop-Accept-EZ"; public static final String FEFINFO_HEADER = "X-Hadoop-feInfo"; + public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy"; /** * Default connection factory may be overridden in tests to use smaller @@ -181,6 +184,7 @@ public class WebHdfsFileSystem extends FileSystem private DFSOpsCountStatistics storageStatistics; private KeyProvider testProvider; + private boolean isTLSKrb; /** * Return the protocol scheme for the FileSystem. @@ -242,6 +246,7 @@ public synchronized void initialize(URI uri, Configuration conf .newDefaultURLConnectionFactory(connectTimeout, readTimeout, conf); } + this.isTLSKrb = "HTTPS_ONLY".equals(conf.get(DFS_HTTP_POLICY_KEY)); ugi = UserGroupInformation.getCurrentUser(); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); @@ -699,6 +704,11 @@ protected HttpURLConnection connect(URL url) throws IOException { //redirect hostname and port redirectHost = null; + if (url.getProtocol().equals("http") && + UserGroupInformation.isSecurityEnabled() && + isTLSKrb) { + throw new IOException("Access denied: dfs.http.policy is HTTPS_ONLY."); + } // resolve redirects for a DN operation unless already resolved if (op.getRedirect() && !redirected) { @@ -2109,6 +2119,12 @@ public boolean hasPathCapability(final Path path, final String capability) return super.hasPathCapability(p, capability); } + @Override + public MultipartUploaderBuilder createMultipartUploader(final Path basePath) + throws IOException { + return new FileSystemMultipartUploaderBuilder(this, basePath); + } + /** * This class is used for opening, reading, and seeking files while using the * WebHdfsFileSystem. This class will invoke the retry policy when performing diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java index 9ab3ad57fa8e2..5419219b6effc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java @@ -24,7 +24,7 @@ import java.util.List; import java.util.regex.Pattern; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.permission.AclEntry; /** AclPermission parameter. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java index 19efb9d1818c5..0ebafe747fc28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.text.MessageFormat; import java.util.regex.Pattern; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory deleted file mode 100644 index b153fd9924381..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.hdfs.DFSMultipartUploaderFactory diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java index b24df2bfce929..3f1ff8826957e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.collect.HashMultiset; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultiset; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.net.unix.DomainSocket; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java index f73ea6d24712f..1ffec85e02b8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.client.impl; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java index e3f34e3c66954..f28bfa65b59f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java @@ -23,6 +23,7 @@ import org.apache.hadoop.net.MockDomainNameResolver; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Time; import org.junit.Before; import org.junit.BeforeClass; @@ -292,12 +293,22 @@ private void testResolveDomainNameUsingDNS(boolean useFQDN) throws Exception { MockDomainNameResolver.FQDN_2 : "/" + MockDomainNameResolver.ADDR_2; // Check we got the proper addresses assertEquals(2, proxyResults.size()); - assertTrue( - "nn1 wasn't returned: " + proxyResults, - proxyResults.containsKey(resolvedHost1 + ":8020")); - assertTrue( - "nn2 wasn't returned: " + proxyResults, - proxyResults.containsKey(resolvedHost2 + ":8020")); + if (Shell.isJavaVersionAtLeast(14) && useFQDN) { + // JDK-8225499. The string format of unresolved address has been changed. + assertTrue( + "nn1 wasn't returned: " + proxyResults, + proxyResults.containsKey(resolvedHost1 + "/:8020")); + assertTrue( + "nn2 wasn't returned: " + proxyResults, + proxyResults.containsKey(resolvedHost2 + "/:8020")); + } else { + assertTrue( + "nn1 wasn't returned: " + proxyResults, + proxyResults.containsKey(resolvedHost1 + ":8020")); + assertTrue( + "nn2 wasn't returned: " + proxyResults, + proxyResults.containsKey(resolvedHost2 + ":8020")); + } // Check that the Namenodes were invoked assertEquals(NUM_ITERATIONS, nn1Count.get() + nn2Count.get()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java index 5704ef0bd4c03..23e7b74469c8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java @@ -56,7 +56,7 @@ import static org.mockito.Mockito.when; import static org.mockito.Mockito.mock; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestRequestHedgingProxyProvider { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java index 6c426f575b91c..010d7c5870c54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java @@ -34,7 +34,7 @@ import java.net.HttpURLConnection; import java.net.URL; -import com.google.common.net.HttpHeaders; +import org.apache.hadoop.thirdparty.com.google.common.net.HttpHeaders; import org.apache.hadoop.hdfs.web.ByteRangeInputStream.InputStreamAndFileLength; import org.apache.hadoop.test.Whitebox; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index 2be8bf4336279..108ce50420640 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -33,7 +33,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.LoggerFactory; public final class TestURLConnectionFactory { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java index bd12204118694..af17d0b1e5ee3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java @@ -31,7 +31,7 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import org.mockserver.client.server.MockServerClient; +import org.mockserver.client.MockServerClient; import org.mockserver.integration.ClientAndServer; import org.mockserver.model.Header; import org.mockserver.model.HttpRequest; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java index 935cd9eeb665b..9ae7ff88fb6b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java @@ -24,7 +24,7 @@ import org.apache.hadoop.util.Timer; import org.apache.http.HttpStatus; import org.junit.Test; -import org.mockserver.client.server.MockServerClient; +import org.mockserver.client.MockServerClient; import org.mockserver.integration.ClientAndServer; import org.mockserver.model.Header; import org.mockserver.model.HttpRequest; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java index d9996bc4f2f0e..3ef105ca246c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java @@ -23,7 +23,7 @@ import org.apache.hadoop.util.Timer; import org.apache.http.HttpStatus; import org.junit.Test; -import org.mockserver.client.server.MockServerClient; +import org.mockserver.client.MockServerClient; import org.mockserver.integration.ClientAndServer; import org.mockserver.model.Header; import org.mockserver.model.HttpRequest; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index eac05832fcd7c..4d14b4dacc63c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -22,11 +22,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-hdfs-httpfs - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT jar Apache Hadoop HttpFS @@ -72,8 +72,8 @@ javax.servlet-api - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava compile @@ -118,10 +118,6 @@ org.eclipse.jetty servlet-api-2.5 - - com.amazonaws - aws-java-sdk-s3 - org.eclipse.jdt core @@ -153,10 +149,6 @@ org.eclipse.jetty servlet-api-2.5 - - com.amazonaws - aws-java-sdk-s3 - org.eclipse.jdt core @@ -281,7 +273,7 @@ - javadoc + javadoc-no-fork site @@ -345,8 +337,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 17227598844a9..cb8468dc58edd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -23,7 +23,7 @@ import java.util.EnumSet; import java.util.List; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonPathCapabilities; @@ -70,9 +70,9 @@ import org.json.simple.parser.JSONParser; import org.json.simple.parser.ParseException; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; @@ -197,6 +197,7 @@ public static FILE_TYPE getType(FileStatus fileStatus) { public static final String XATTR_VALUE_JSON = "value"; public static final String XATTRNAMES_JSON = "XAttrNames"; public static final String ECPOLICY_JSON = "ecPolicyObj"; + public static final String SYMLINK_JSON = "symlink"; public static final String FILE_CHECKSUM_JSON = "FileChecksum"; public static final String CHECKSUM_ALGORITHM_JSON = "algorithm"; @@ -1093,6 +1094,9 @@ private FileStatus createFileStatus(Path parent, JSONObject json) { String pathSuffix = (String) json.get(PATH_SUFFIX_JSON); Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix); FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON)); + String symLinkValue = + type == FILE_TYPE.SYMLINK ? (String) json.get(SYMLINK_JSON) : null; + Path symLink = symLinkValue == null ? null : new Path(symLinkValue); long len = (Long) json.get(LENGTH_JSON); String owner = (String) json.get(OWNER_JSON); String group = (String) json.get(GROUP_JSON); @@ -1117,11 +1121,12 @@ private FileStatus createFileStatus(Path parent, JSONObject json) { new FsPermissionExtension(permission, aBit, eBit, ecBit); FileStatus fileStatus = new FileStatus(len, FILE_TYPE.DIRECTORY == type, replication, blockSize, mTime, aTime, deprecatedPerm, owner, group, - null, path, FileStatus.attributes(aBit, eBit, ecBit, seBit)); + symLink, path, FileStatus.attributes(aBit, eBit, ecBit, seBit)); return fileStatus; } else { return new FileStatus(len, FILE_TYPE.DIRECTORY == type, - replication, blockSize, mTime, aTime, permission, owner, group, path); + replication, blockSize, mTime, aTime, permission, owner, group, + symLink, path); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index 0f8ea071e2d3f..b2e9a8470d2c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.http.server; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; @@ -33,6 +34,7 @@ import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.http.client.HttpFSFileSystem; +import org.apache.hadoop.fs.http.client.HttpFSFileSystem.FILE_TYPE; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -45,7 +47,6 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.web.JsonUtil; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.lib.service.FileSystemAccess; import org.apache.hadoop.util.StringUtils; import org.json.simple.JSONArray; @@ -71,7 +72,22 @@ * FileSystem operation executors used by {@link HttpFSServer}. */ @InterfaceAudience.Private -public class FSOperations { +public final class FSOperations { + + private static int bufferSize = 4096; + + private FSOperations() { + // not called + } + /** + * Set the buffer size. The size is set during the initialization of + * HttpFSServerWebApp. + * @param conf the configuration to get the bufferSize + */ + public static void setBufferSize(Configuration conf) { + bufferSize = conf.getInt(HTTPFS_BUFFER_SIZE_KEY, + HTTP_BUFFER_SIZE_DEFAULT); + } /** * @param fileStatus a FileStatus object @@ -110,8 +126,17 @@ private static Map toJsonInner(FileStatus fileStatus, Map json = new LinkedHashMap(); json.put(HttpFSFileSystem.PATH_SUFFIX_JSON, (emptyPathSuffix) ? "" : fileStatus.getPath().getName()); - json.put(HttpFSFileSystem.TYPE_JSON, - HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString()); + FILE_TYPE fileType = HttpFSFileSystem.FILE_TYPE.getType(fileStatus); + json.put(HttpFSFileSystem.TYPE_JSON, fileType.toString()); + if (fileType.equals(FILE_TYPE.SYMLINK)) { + // put the symlink into Json + try { + json.put(HttpFSFileSystem.SYMLINK_JSON, + fileStatus.getSymlink().getName()); + } catch (IOException e) { + // Can't happen. + } + } json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen()); json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner()); json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup()); @@ -425,10 +450,9 @@ public FSAppend(InputStream is, String path) { */ @Override public Void execute(FileSystem fs) throws IOException { - int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096); OutputStream os = fs.append(path, bufferSize); - IOUtils.copyBytes(is, os, bufferSize, true); - os.close(); + long bytes = copyBytes(is, os); + HttpFSServerWebApp.get().getMetrics().incrBytesWritten(bytes); return null; } @@ -511,6 +535,7 @@ public FSTruncate(String path, long newLength) { @Override public JSONObject execute(FileSystem fs) throws IOException { boolean result = fs.truncate(path, newLength); + HttpFSServerWebApp.get().getMetrics().incrOpsTruncate(); return toJSON( StringUtils.toLowerCase(HttpFSFileSystem.TRUNCATE_JSON), result); } @@ -627,16 +652,65 @@ public Void execute(FileSystem fs) throws IOException { fsPermission = FsCreateModes.create(fsPermission, new FsPermission(unmaskedPermission)); } - int bufferSize = fs.getConf().getInt(HTTPFS_BUFFER_SIZE_KEY, - HTTP_BUFFER_SIZE_DEFAULT); OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null); - IOUtils.copyBytes(is, os, bufferSize, true); - os.close(); + long bytes = copyBytes(is, os); + HttpFSServerWebApp.get().getMetrics().incrBytesWritten(bytes); return null; } } + /** + * These copyBytes methods combines the two different flavors used originally. + * One with length and another one with buffer size. + * In this impl, buffer size is determined internally, which is a singleton + * normally set during initialization. + * @param in the inputStream + * @param out the outputStream + * @return the totalBytes + * @throws IOException the exception to be thrown. + */ + public static long copyBytes(InputStream in, OutputStream out) + throws IOException { + return copyBytes(in, out, Long.MAX_VALUE); + } + + public static long copyBytes(InputStream in, OutputStream out, long count) + throws IOException { + long totalBytes = 0; + + // If bufferSize is not initialized use 4k. This will not happen + // if all callers check and set it. + byte[] buf = new byte[bufferSize]; + long bytesRemaining = count; + int bytesRead; + + try { + while (bytesRemaining > 0) { + int bytesToRead = (int) + (bytesRemaining < buf.length ? bytesRemaining : buf.length); + + bytesRead = in.read(buf, 0, bytesToRead); + if (bytesRead == -1) { + break; + } + + out.write(buf, 0, bytesRead); + bytesRemaining -= bytesRead; + totalBytes += bytesRead; + } + return totalBytes; + } finally { + // Originally IOUtils.copyBytes() were called with close=true. So we are + // implementing the same behavior here. + try { + in.close(); + } finally { + out.close(); + } + } + } + /** * Executor that performs a delete FileSystemAccess files system operation. */ @@ -669,6 +743,7 @@ public FSDelete(String path, boolean recursive) { @Override public JSONObject execute(FileSystem fs) throws IOException { boolean deleted = fs.delete(path, recursive); + HttpFSServerWebApp.get().getMetrics().incrOpsDelete(); return toJSON( StringUtils.toLowerCase(HttpFSFileSystem.DELETE_JSON), deleted); } @@ -737,6 +812,7 @@ public FSFileStatus(String path) { @Override public Map execute(FileSystem fs) throws IOException { FileStatus status = fs.getFileStatus(path); + HttpFSServerWebApp.get().getMetrics().incrOpsStat(); return toJson(status); } @@ -765,7 +841,6 @@ public JSONObject execute(FileSystem fs) throws IOException { json.put(HttpFSFileSystem.HOME_DIR_JSON, homeDir.toUri().getPath()); return json; } - } /** @@ -803,6 +878,7 @@ public FSListStatus(String path, String filter) throws IOException { @Override public Map execute(FileSystem fs) throws IOException { FileStatus[] fileStatuses = fs.listStatus(path, filter); + HttpFSServerWebApp.get().getMetrics().incrOpsListing(); return toJson(fileStatuses, fs.getFileStatus(path).isFile()); } @@ -894,6 +970,7 @@ public JSONObject execute(FileSystem fs) throws IOException { new FsPermission(unmaskedPermission)); } boolean mkdirs = fs.mkdirs(path, fsPermission); + HttpFSServerWebApp.get().getMetrics().incrOpsMkdir(); return toJSON(HttpFSFileSystem.MKDIRS_JSON, mkdirs); } @@ -926,8 +1003,8 @@ public FSOpen(String path) { */ @Override public InputStream execute(FileSystem fs) throws IOException { - int bufferSize = HttpFSServerWebApp.get().getConfig().getInt( - HTTPFS_BUFFER_SIZE_KEY, HTTP_BUFFER_SIZE_DEFAULT); + // Only updating ops count. bytesRead is updated in InputStreamEntity + HttpFSServerWebApp.get().getMetrics().incrOpsOpen(); return fs.open(path, bufferSize); } @@ -965,6 +1042,7 @@ public FSRename(String path, String toPath) { @Override public JSONObject execute(FileSystem fs) throws IOException { boolean renamed = fs.rename(path, toPath); + HttpFSServerWebApp.get().getMetrics().incrOpsRename(); return toJSON(HttpFSFileSystem.RENAME_JSON, renamed); } @@ -1896,6 +1974,7 @@ public Void execute(FileSystem fs) throws IOException { if (fs instanceof DistributedFileSystem) { DistributedFileSystem dfs = (DistributedFileSystem) fs; dfs.access(path, mode); + HttpFSServerWebApp.get().getMetrics().incrOpsCheckAccess(); } else { throw new UnsupportedOperationException("checkaccess is " + "not supported for HttpFs on " + fs.getClass() diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java index 8d301827364cf..4739e42137ccb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java @@ -70,12 +70,16 @@ public Response toResponse(Throwable throwable) { status = Response.Status.NOT_FOUND; } else if (throwable instanceof IOException) { status = Response.Status.INTERNAL_SERVER_ERROR; + logErrorFully(status, throwable); } else if (throwable instanceof UnsupportedOperationException) { status = Response.Status.BAD_REQUEST; + logErrorFully(status, throwable); } else if (throwable instanceof IllegalArgumentException) { status = Response.Status.BAD_REQUEST; + logErrorFully(status, throwable); } else { status = Response.Status.INTERNAL_SERVER_ERROR; + logErrorFully(status, throwable); } return createResponse(status, throwable); } @@ -95,4 +99,7 @@ protected void log(Response.Status status, Throwable throwable) { LOG.warn("[{}:{}] response [{}] {}", method, path, status, message, throwable); } + private void logErrorFully(Response.Status status, Throwable throwable) { + LOG.debug("Failed with {}", status, throwable); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 3cca83eac53a0..5965f7082fa73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.http.server; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -106,8 +106,38 @@ @Path(HttpFSFileSystem.SERVICE_VERSION) @InterfaceAudience.Private public class HttpFSServer { + + enum AccessMode { + READWRITE, WRITEONLY, READONLY; + } private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit"); private static final Logger LOG = LoggerFactory.getLogger(HttpFSServer.class); + AccessMode accessMode = AccessMode.READWRITE; + + public HttpFSServer() { + Configuration conf = HttpFSServerWebApp.get().getConfig(); + final String accessModeString = conf.get("httpfs.access.mode", "read-write").toLowerCase(); + if(accessModeString.compareTo("write-only") == 0) + accessMode = AccessMode.WRITEONLY; + else if(accessModeString.compareTo("read-only") == 0) + accessMode = AccessMode.READONLY; + else + accessMode = AccessMode.READWRITE; + } + + + // First try getting a user through HttpUserGroupInformation. This will return + // if the built-in hadoop auth filter is not used. Fall back to getting the + // authenticated user from the request. + private UserGroupInformation getHttpUGI(HttpServletRequest request) { + UserGroupInformation user = HttpUserGroupInformation.get(); + if (user != null) { + return user; + } + + return UserGroupInformation.createRemoteUser(request.getUserPrincipal().getName()); + } + /** * Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem for the effective @@ -218,6 +248,12 @@ public Response get(@PathParam("path") String path, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { + // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode + if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && + (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && + accessMode == AccessMode.WRITEONLY) { + return Response.status(Response.Status.FORBIDDEN).build(); + } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); @@ -248,7 +284,7 @@ public InputStream run() throws Exception { } }); } catch (InterruptedException ie) { - LOG.info("Open interrupted.", ie); + LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); @@ -281,7 +317,7 @@ public InputStream run() throws Exception { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); - AUDIT_LOG.info(""); + AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } @@ -303,7 +339,7 @@ public InputStream run() throws Exception { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); - AUDIT_LOG.info("[{}]", path); + AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } @@ -311,7 +347,7 @@ public InputStream run() throws Exception { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); - AUDIT_LOG.info("[{}]", path); + AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } @@ -490,6 +526,10 @@ public Response delete(@PathParam("path") String path, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { + // Do not allow DELETE commands in read-only mode + if(accessMode == AccessMode.READONLY) { + return Response.status(Response.Status.FORBIDDEN).build(); + } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); @@ -577,6 +617,10 @@ public Response post(InputStream is, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { + // Do not allow POST commands in read-only mode + if(accessMode == AccessMode.READONLY) { + return Response.status(Response.Status.FORBIDDEN).build(); + } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); @@ -585,35 +629,30 @@ public Response post(InputStream is, switch (op.value()) { case APPEND: { Boolean hasData = params.get(DataParam.NAME, DataParam.class); - if (!hasData) { - URI redirectURL = createUploadRedirectionURL( - uriInfo, HttpFSFileSystem.Operation.APPEND); - Boolean noRedirect = params.get( - NoRedirectParam.NAME, NoRedirectParam.class); - if (noRedirect) { + URI redirectURL = createUploadRedirectionURL(uriInfo, + HttpFSFileSystem.Operation.APPEND); + Boolean noRedirect = + params.get(NoRedirectParam.NAME, NoRedirectParam.class); + if (noRedirect) { final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); - } else { - response = Response.temporaryRedirect(redirectURL).build(); - } - } else { + } else if (hasData) { FSOperations.FSAppend command = new FSOperations.FSAppend(is, path); fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok().type(MediaType.APPLICATION_JSON).build(); + } else { + response = Response.temporaryRedirect(redirectURL).build(); } break; } case CONCAT: { - System.out.println("HTTPFS SERVER CONCAT"); String sources = params.get(SourcesParam.NAME, SourcesParam.class); - FSOperations.FSConcat command = new FSOperations.FSConcat(path, sources.split(",")); fsExecute(user, command); AUDIT_LOG.info("[{}]", path); - System.out.println("SENT RESPONSE"); response = Response.ok().build(); break; } @@ -662,7 +701,8 @@ public Response post(InputStream is, protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum uploadOperation) { UriBuilder uriBuilder = uriInfo.getRequestUriBuilder(); uriBuilder = uriBuilder.replaceQueryParam(OperationParam.NAME, uploadOperation). - queryParam(DataParam.NAME, Boolean.TRUE); + queryParam(DataParam.NAME, Boolean.TRUE) + .replaceQueryParam(NoRedirectParam.NAME, (Object[]) null); return uriBuilder.build(null); } @@ -718,6 +758,10 @@ public Response put(InputStream is, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { + // Do not allow PUT commands in read-only mode + if(accessMode == AccessMode.READONLY) { + return Response.status(Response.Status.FORBIDDEN).build(); + } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); @@ -726,18 +770,14 @@ public Response put(InputStream is, switch (op.value()) { case CREATE: { Boolean hasData = params.get(DataParam.NAME, DataParam.class); - if (!hasData) { - URI redirectURL = createUploadRedirectionURL( - uriInfo, HttpFSFileSystem.Operation.CREATE); - Boolean noRedirect = params.get( - NoRedirectParam.NAME, NoRedirectParam.class); - if (noRedirect) { + URI redirectURL = createUploadRedirectionURL(uriInfo, + HttpFSFileSystem.Operation.CREATE); + Boolean noRedirect = + params.get(NoRedirectParam.NAME, NoRedirectParam.class); + if (noRedirect) { final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); - } else { - response = Response.temporaryRedirect(redirectURL).build(); - } - } else { + } else if (hasData) { Short permission = params.get(PermissionParam.NAME, PermissionParam.class); Short unmaskedPermission = params.get(UnmaskedPermissionParam.NAME, @@ -761,6 +801,8 @@ public Response put(InputStream is, "Location", uriInfo.getAbsolutePath()); response = Response.created(uriInfo.getAbsolutePath()) .type(MediaType.APPLICATION_JSON).entity(js).build(); + } else { + response = Response.temporaryRedirect(redirectURL).build(); } break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java index 66438b5f4ab41..fd60186950763 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java @@ -21,9 +21,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.http.server.metrics.HttpFSServerMetrics; import org.apache.hadoop.lib.server.ServerException; import org.apache.hadoop.lib.service.FileSystemAccess; import org.apache.hadoop.lib.servlet.ServerWebApp; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.util.JvmPauseMonitor; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,6 +60,7 @@ public class HttpFSServerWebApp extends ServerWebApp { public static final String CONF_ADMIN_GROUP = "admin.group"; private static HttpFSServerWebApp SERVER; + private static HttpFSServerMetrics metrics; private String adminGroup; @@ -102,6 +107,7 @@ public void init() throws ServerException { LOG.info("Connects to Namenode [{}]", get().get(FileSystemAccess.class).getFileSystemConfiguration(). get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); + setMetrics(getConfig()); } /** @@ -110,9 +116,22 @@ public void init() throws ServerException { @Override public void destroy() { SERVER = null; + if (metrics != null) { + metrics.shutdown(); + } super.destroy(); } + private static void setMetrics(Configuration config) { + LOG.info("Initializing HttpFSServerMetrics"); + metrics = HttpFSServerMetrics.create(config, "HttpFSServer"); + JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(); + pauseMonitor.init(config); + pauseMonitor.start(); + metrics.getJvmMetrics().setPauseMonitor(pauseMonitor); + FSOperations.setBufferSize(config); + DefaultMetricsSystem.initialize("HttpFSServer"); + } /** * Returns HttpFSServer server singleton, configuration and services are * accessible through it. @@ -123,6 +142,14 @@ public static HttpFSServerWebApp get() { return SERVER; } + /** + * gets the HttpFSServerMetrics instance. + * @return the HttpFSServerMetrics singleton. + */ + public static HttpFSServerMetrics getMetrics() { + return metrics; + } + /** * Returns HttpFSServer admin group. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java new file mode 100644 index 0000000000000..524ec09290a9e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.http.server.metrics; + +import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.source.JvmMetrics; + +import java.util.concurrent.ThreadLocalRandom; + +/** + * + * This class is for maintaining the various HttpFSServer statistics + * and publishing them through the metrics interfaces. + * This also registers the JMX MBean for RPC. + *

    + * This class has a number of metrics variables that are publicly accessible; + * these variables (objects) have methods to update their values; + * for example: + *

    {@link #bytesRead}.inc() + * + */ +@InterfaceAudience.Private +@Metrics(about="HttpFSServer metrics", context="httpfs") +public class HttpFSServerMetrics { + + private @Metric MutableCounterLong bytesWritten; + private @Metric MutableCounterLong bytesRead; + + // Write ops + private @Metric MutableCounterLong opsCreate; + private @Metric MutableCounterLong opsAppend; + private @Metric MutableCounterLong opsTruncate; + private @Metric MutableCounterLong opsDelete; + private @Metric MutableCounterLong opsRename; + private @Metric MutableCounterLong opsMkdir; + + // Read ops + private @Metric MutableCounterLong opsOpen; + private @Metric MutableCounterLong opsListing; + private @Metric MutableCounterLong opsStat; + private @Metric MutableCounterLong opsCheckAccess; + + private final MetricsRegistry registry = new MetricsRegistry("httpfsserver"); + private final String name; + private JvmMetrics jvmMetrics = null; + + public HttpFSServerMetrics(String name, String sessionId, + final JvmMetrics jvmMetrics) { + this.name = name; + this.jvmMetrics = jvmMetrics; + registry.tag(SessionId, sessionId); + } + + public static HttpFSServerMetrics create(Configuration conf, + String serverName) { + String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); + MetricsSystem ms = DefaultMetricsSystem.instance(); + JvmMetrics jm = JvmMetrics.create("HttpFSServer", sessionId, ms); + String name = "ServerActivity-"+ (serverName.isEmpty() + ? "UndefinedServer"+ ThreadLocalRandom.current().nextInt() + : serverName.replace(':', '-')); + + return ms.register(name, null, new HttpFSServerMetrics(name, + sessionId, jm)); + } + + public String name() { + return name; + } + + public JvmMetrics getJvmMetrics() { + return jvmMetrics; + } + + public void incrBytesWritten(long bytes) { + bytesWritten.incr(bytes); + } + + public void incrBytesRead(long bytes) { + bytesRead.incr(bytes); + } + + public void incrOpsCreate() { + opsCreate.incr(); + } + + public void incrOpsAppend() { + opsAppend.incr(); + } + + public void incrOpsTruncate() { + opsTruncate.incr(); + } + + public void incrOpsDelete() { + opsDelete.incr(); + } + + public void incrOpsRename() { + opsRename.incr(); + } + + public void incrOpsMkdir() { + opsMkdir.incr(); + } + + public void incrOpsOpen() { + opsOpen.incr(); + } + + public void incrOpsListing() { + opsListing.incr(); + } + + public void incrOpsStat() { + opsStat.incr(); + } + + public void incrOpsCheckAccess() { + opsCheckAccess.incr(); + } + + public void shutdown() { + DefaultMetricsSystem.shutdown(); + } + + public long getOpsMkdir() { + return opsMkdir.value(); + } + + public long getOpsListing() { + return opsListing.value(); + } + + public long getOpsStat() { + return opsStat.value(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/package-info.java new file mode 100644 index 0000000000000..47e8d4a4c2fb2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/package-info.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A package to implement metrics for the HttpFS Server. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +package org.apache.hadoop.fs.http.server.metrics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java index b2bba088911b3..81208bc6e43ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java @@ -194,9 +194,11 @@ protected void init() throws ServiceException { throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex); } - LOG.debug("FileSystemAccess FileSystem configuration:"); - for (Map.Entry entry : serviceHadoopConf) { - LOG.debug(" {} = {}", entry.getKey(), entry.getValue()); + if (LOG.isDebugEnabled()) { + LOG.debug("FileSystemAccess FileSystem configuration:"); + for (Map.Entry entry : serviceHadoopConf) { + LOG.debug(" {} = {}", entry.getKey(), entry.getValue()); + } } setRequiredServiceHadoopConf(serviceHadoopConf); @@ -262,7 +264,7 @@ public void run() { LOG.warn("Error while purging filesystem, " + ex.toString(), ex); } } - LOG.debug("Purged [{}} filesystem instances", count); + LOG.debug("Purged [{}] filesystem instances", count); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java index 5e855de7eec85..985feed09981c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java @@ -18,7 +18,7 @@ package org.apache.hadoop.lib.servlet; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.lib.server.Server; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java index 9edb24a7bcbc0..5f387c908506e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java @@ -19,6 +19,9 @@ package org.apache.hadoop.lib.wsrs; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.http.server.FSOperations; +import org.apache.hadoop.fs.http.server.HttpFSServerWebApp; +import org.apache.hadoop.fs.http.server.metrics.HttpFSServerMetrics; import org.apache.hadoop.io.IOUtils; import javax.ws.rs.core.StreamingOutput; @@ -45,10 +48,17 @@ public InputStreamEntity(InputStream is) { @Override public void write(OutputStream os) throws IOException { IOUtils.skipFully(is, offset); + long bytes = 0L; if (len == -1) { - IOUtils.copyBytes(is, os, 4096, true); + // Use the configured buffer size instead of hardcoding to 4k + bytes = FSOperations.copyBytes(is, os); } else { - IOUtils.copyBytes(is, os, len, true); + bytes = FSOperations.copyBytes(is, os, len); + } + // Update metrics. + HttpFSServerMetrics metrics = HttpFSServerWebApp.get().getMetrics(); + if (metrics != null) { + metrics.incrBytesRead(bytes); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java index e0f62002c70d4..c171e929ca6d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java @@ -19,7 +19,7 @@ import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.util.List; import java.util.Map; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java index c93f8f2b17db1..56a999f519cdf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java @@ -18,7 +18,7 @@ package org.apache.hadoop.lib.wsrs; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.api.core.HttpContext; import com.sun.jersey.core.spi.component.ComponentContext; import com.sun.jersey.core.spi.component.ComponentScope; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml index 5b8e469e96bc7..869e4e53e05ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml @@ -56,7 +56,7 @@ hadoop.http.idle_timeout.ms - 1000 + 60000 Httpfs Server connection timeout in milliseconds. @@ -323,4 +323,15 @@ + + httpfs.access.mode + read-write + + Sets the access mode for HTTPFS. If access is not allowed the FORBIDDED (403) is returned. + Valid access modes are: + read-write Full Access allowed + write-only PUT POST and DELETE full Access. GET only allows GETFILESTATUS and LISTSTATUS + read-only GET Full Access PUT POST and DELETE are FORBIDDEN + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index 902861d3bd10f..53b9b7ab3b7ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -73,7 +73,7 @@ import org.eclipse.jetty.server.Server; import org.eclipse.jetty.webapp.WebAppContext; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.File; import java.io.FileOutputStream; @@ -364,6 +364,42 @@ private void testDelete() throws Exception { fs.close(); } + private void testListSymLinkStatus() throws Exception { + if (isLocalFS()) { + // do not test the the symlink for local FS. + return; + } + FileSystem fs = FileSystem.get(getProxiedFSConf()); + boolean isWebhdfs = fs instanceof WebHdfsFileSystem; + Path path = + new Path(getProxiedFSTestDir() + "-symlink", "targetFoo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + Path linkPath = + new Path(getProxiedFSTestDir()+ "-symlink", "symlinkFoo.txt"); + fs.createSymlink(path, linkPath, false); + fs = getHttpFSFileSystem(); + FileStatus linkStatus = fs.getFileStatus(linkPath); + FileStatus status1 = fs.getFileStatus(path); + + FileStatus[] stati = fs.listStatus(path.getParent()); + assertEquals(2, stati.length); + + int countSymlink = 0; + for (int i = 0; i < stati.length; i++) { + FileStatus fStatus = stati[i]; + countSymlink += fStatus.isSymlink() ? 1 : 0; + } + assertEquals(1, countSymlink); + + assertFalse(status1.isSymlink()); + if (isWebhdfs) { + assertTrue(linkStatus.isSymlink()); + } + fs.close(); + } + private void testListStatus() throws Exception { FileSystem fs = FileSystem.get(getProxiedFSConf()); boolean isDFS = fs instanceof DistributedFileSystem; @@ -1179,6 +1215,7 @@ private void operation(Operation op) throws Exception { break; case LIST_STATUS: testListStatus(); + testListSymLinkStatus(); break; case WORKING_DIRECTORY: testWorkingdirectory(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSAccessControlled.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSAccessControlled.java new file mode 100644 index 0000000000000..47d99352de1b2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSAccessControlled.java @@ -0,0 +1,355 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.http.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.HTestCase; +import org.apache.hadoop.test.HadoopUsersConfTestHelper; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.test.TestJetty; +import org.apache.hadoop.test.TestJettyHelper; +import org.junit.Assert; +import org.junit.Test; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.webapp.WebAppContext; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.OutputStream; +import java.io.Writer; +import java.net.HttpURLConnection; +import java.net.URL; +import java.text.MessageFormat; + +/** + * This test class ensures that everything works as expected when + * support with the access controlled HTTPFS file system. + */ +public class TestHttpFSAccessControlled extends HTestCase { + + private MiniDFSCluster miniDfs; + private Configuration nnConf; + + /** + * Fire up our own hand-rolled MiniDFSCluster. We do this here instead + * of relying on TestHdfsHelper because we don't want to turn on ACL + * support. + * + * @throws Exception + */ + private void startMiniDFS() throws Exception { + + File testDirRoot = TestDirHelper.getTestDir(); + + if (System.getProperty("hadoop.log.dir") == null) { + System.setProperty("hadoop." + + "log.dir", + new File(testDirRoot, "hadoop-log").getAbsolutePath()); + } + if (System.getProperty("test.build.data") == null) { + System.setProperty("test.build.data", + new File(testDirRoot, "hadoop-data").getAbsolutePath()); + } + + Configuration conf = HadoopUsersConfTestHelper.getBaseConf(); + HadoopUsersConfTestHelper.addUserConf(conf); + conf.set("fs.hdfs.impl.disable.cache", "true"); + conf.set("dfs.block.access.token.enable", "false"); + conf.set("dfs.permissions", "true"); + conf.set("hadoop.security.authentication", "simple"); + + MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); + builder.numDataNodes(2); + miniDfs = builder.build(); + nnConf = miniDfs.getConfiguration(0); + } + + /** + * Create an HttpFS Server to talk to the MiniDFSCluster we created. + * @throws Exception + */ + private void createHttpFSServer() throws Exception { + File homeDir = TestDirHelper.getTestDir(); + Assert.assertTrue(new File(homeDir, "conf").mkdir()); + Assert.assertTrue(new File(homeDir, "log").mkdir()); + Assert.assertTrue(new File(homeDir, "temp").mkdir()); + HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath()); + + File secretFile = new File(new File(homeDir, "conf"), "secret"); + Writer w = new FileWriter(secretFile); + w.write("secret"); + w.close(); + + // HDFS configuration + File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf"); + if ( !hadoopConfDir.mkdirs() ) { + throw new IOException(); + } + + String fsDefaultName = + nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); + Configuration conf = new Configuration(false); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName); + + File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml"); + OutputStream os = new FileOutputStream(hdfsSite); + conf.writeXml(os); + os.close(); + + // HTTPFS configuration + conf = new Configuration(false); + conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString()); + conf.set("httpfs.proxyuser." + + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", + HadoopUsersConfTestHelper.getHadoopProxyUserGroups()); + conf.set("httpfs.proxyuser." + + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", + HadoopUsersConfTestHelper.getHadoopProxyUserHosts()); + conf.set("httpfs.authentication.signature.secret.file", + secretFile.getAbsolutePath()); + + File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml"); + os = new FileOutputStream(httpfsSite); + conf.writeXml(os); + os.close(); + + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + URL url = cl.getResource("webapp"); + if ( url == null ) { + throw new IOException(); + } + WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs"); + Server server = TestJettyHelper.getJettyServer(); + server.setHandler(context); + server.start(); + } + + /** + * Talks to the http interface to get the json output of a *STATUS command + * on the given file. + * + * @param filename The file to query. + * @param message Failure message + * @param command Command to test + * @param expectOK Is this operation expected to succeed? + * @throws Exception + */ + private void getCmd(String filename, String message, String command, boolean expectOK) + throws Exception { + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + String outMsg = message + " (" + command + ")"; + // Remove leading / from filename + if ( filename.charAt(0) == '/' ) { + filename = filename.substring(1); + } + String pathOps = MessageFormat.format( + "/webhdfs/v1/{0}?user.name={1}&op={2}", + filename, user, command); + URL url = new URL(TestJettyHelper.getJettyURL(), pathOps); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("GET"); + conn.connect(); + int resp = conn.getResponseCode(); + if ( expectOK ) { + Assert.assertEquals( outMsg, HttpURLConnection.HTTP_OK, resp); + } else { + Assert.assertEquals(outMsg, HttpURLConnection.HTTP_FORBIDDEN, resp); + } + } + + /** + * General-purpose http PUT command to the httpfs server. + * @param filename The file to operate upon + * @param message Failure message + * @param command The command to perform (SETPERMISSION, etc) + * @param params Parameters to command + * @param expectOK Is this operation expected to succeed? + */ + private void putCmd(String filename, String message, String command, + String params, boolean expectOK) throws Exception { + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + String outMsg = message + " (" + command + ")"; + // Remove leading / from filename + if ( filename.charAt(0) == '/' ) { + filename = filename.substring(1); + } + String pathOps = MessageFormat.format( + "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}", + filename, user, (params == null) ? "" : "&", + (params == null) ? "" : params, command); + URL url = new URL(TestJettyHelper.getJettyURL(), pathOps); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("PUT"); + conn.connect(); + int resp = conn.getResponseCode(); + if ( expectOK ) { + Assert.assertEquals(outMsg, HttpURLConnection.HTTP_OK, resp); + } else { + Assert.assertEquals(outMsg, HttpURLConnection.HTTP_FORBIDDEN, resp); + } + } + + /** + * General-purpose http PUT command to the httpfs server. + * @param filename The file to operate upon + * @param message Failure message + * @param command The command to perform (SETPERMISSION, etc) + * @param params Parameters to command + * @param expectOK Is this operation expected to succeed? + */ + private void deleteCmd(String filename, String message, String command, + String params, boolean expectOK) throws Exception { + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + String outMsg = message + " (" + command + ")"; + // Remove leading / from filename + if ( filename.charAt(0) == '/' ) { + filename = filename.substring(1); + } + String pathOps = MessageFormat.format( + "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}", + filename, user, (params == null) ? "" : "&", + (params == null) ? "" : params, command); + URL url = new URL(TestJettyHelper.getJettyURL(), pathOps); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("DELETE"); + conn.connect(); + int resp = conn.getResponseCode(); + if ( expectOK ) { + Assert.assertEquals(outMsg, HttpURLConnection.HTTP_OK, resp); + } else { + Assert.assertEquals(outMsg, HttpURLConnection.HTTP_FORBIDDEN, resp); + } + } + + /** + * General-purpose http POST command to the httpfs server. + * @param filename The file to operate upon + * @param message Failure message + * @param command The command to perform (UNSETSTORAGEPOLICY, etc) + * @param params Parameters to command" + * @param expectOK Is this operation expected to succeed? + */ + private void postCmd(String filename, String message, String command, + String params, boolean expectOK) throws Exception { + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + String outMsg = message + " (" + command + ")"; + // Remove leading / from filename + if ( filename.charAt(0) == '/' ) { + filename = filename.substring(1); + } + String pathOps = MessageFormat.format( + "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}", + filename, user, (params == null) ? "" : "&", + (params == null) ? "" : params, command); + URL url = new URL(TestJettyHelper.getJettyURL(), pathOps); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("POST"); + conn.connect(); + int resp = conn.getResponseCode(); + if ( expectOK ) { + Assert.assertEquals(outMsg, HttpURLConnection.HTTP_OK, resp); + } else { + Assert.assertEquals(outMsg, HttpURLConnection.HTTP_FORBIDDEN, resp); + } + } + + /** + * Ensure that + *

      + *
    1. GETFILESTATUS (GET) and LISTSTATUS (GET) work in all modes
    2. + *
    3. GETXATTRS (GET) works in read-write and read-only but write-only throws an exception
    4. + *
    5. SETPERMISSION (PUT) works in read-write and write only but read-only throws an exception
    6. + *
    7. SETPERMISSION (POST) works in read-write and write only but read-only throws an exception
    8. + *
    9. DELETE (DELETE) works in read-write and write only but read-only throws an exception
    10. + *
    + * + * @throws Exception + */ + @Test + @TestDir + @TestJetty + public void testAcessControlledFS() throws Exception { + final String testRwMsg = "Test read-write "; + final String testRoMsg = "Test read-only "; + final String testWoMsg = "Test write-only "; + final String defUser1 = "default:user:glarch:r-x"; + final String dir = "/testAccess"; + final String pathRW = dir + "/foo-rw"; + final String pathWO = dir + "/foo-wo"; + final String pathRO = dir + "/foo-ro"; + final String setPermSpec = "744"; + final String snapshopSpec = "snapshotname=test-snap"; + startMiniDFS(); + createHttpFSServer(); + + FileSystem fs = FileSystem.get(nnConf); + fs.mkdirs(new Path(dir)); + OutputStream os = fs.create(new Path(pathRW)); + os.write(1); + os.close(); + + os = fs.create(new Path(pathWO)); + os.write(1); + os.close(); + + os = fs.create(new Path(pathRO)); + os.write(1); + os.close(); + + Configuration conf = HttpFSServerWebApp.get().getConfig(); + + /* test Read-Write Mode */ + conf.setStrings("httpfs.access.mode", "read-write"); + getCmd(pathRW, testRwMsg + "GET", "GETFILESTATUS", true); + getCmd(pathRW, testRwMsg + "GET", "LISTSTATUS", true); + getCmd(pathRW, testRwMsg + "GET", "GETXATTRS", true); + putCmd(pathRW, testRwMsg + "PUT", "SETPERMISSION", setPermSpec, true); + postCmd(pathRW, testRwMsg + "POST", "UNSETSTORAGEPOLICY", null, true); + deleteCmd(pathRW, testRwMsg + "DELETE", "DELETE", null, true); + + /* test Write-Only Mode */ + conf.setStrings("httpfs.access.mode", "write-only"); + getCmd(pathWO, testWoMsg + "GET", "GETFILESTATUS", true); + getCmd(pathWO, testWoMsg + "GET", "LISTSTATUS", true); + getCmd(pathWO, testWoMsg + "GET", "GETXATTRS", false); + putCmd(pathWO, testWoMsg + "PUT", "SETPERMISSION", setPermSpec, true); + postCmd(pathWO, testWoMsg + "POST", "UNSETSTORAGEPOLICY", null, true); + deleteCmd(pathWO, testWoMsg + "DELETE", "DELETE", null, true); + + /* test Read-Only Mode */ + conf.setStrings("httpfs.access.mode", "read-only"); + getCmd(pathRO, testRoMsg + "GET", "GETFILESTATUS", true); + getCmd(pathRO, testRoMsg + "GET", "LISTSTATUS", true); + getCmd(pathRO, testRoMsg + "GET", "GETXATTRS", true); + putCmd(pathRO, testRoMsg + "PUT", "SETPERMISSION", setPermSpec, false); + postCmd(pathRO, testRoMsg + "POST", "UNSETSTORAGEPOLICY", null, false); + deleteCmd(pathRO, testRoMsg + "DELETE", "DELETE", null, false); + + conf.setStrings("httpfs.access.mode", "read-write"); + + miniDfs.shutdown(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index da7080599860e..ebb118e52f817 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -60,6 +60,7 @@ import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -100,8 +101,9 @@ import org.eclipse.jetty.server.Server; import org.eclipse.jetty.webapp.WebAppContext; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import java.util.Properties; +import java.util.concurrent.Callable; import java.util.regex.Pattern; import javax.ws.rs.HttpMethod; @@ -114,6 +116,23 @@ */ public class TestHttpFSServer extends HFSTestCase { + /** + * define metric getters for unit tests. + */ + private static Callable defaultEntryMetricGetter = () -> 0L; + private static Callable defaultExitMetricGetter = () -> 1L; + private static HashMap> metricsGetter = + new HashMap>() { + { + put("LISTSTATUS", + () -> HttpFSServerWebApp.get().getMetrics().getOpsListing()); + put("MKDIRS", + () -> HttpFSServerWebApp.get().getMetrics().getOpsMkdir()); + put("GETFILESTATUS", + () -> HttpFSServerWebApp.get().getMetrics().getOpsStat()); + } + }; + @Test @TestDir @TestJetty @@ -397,7 +416,8 @@ public void instrumentation() throws Exception { @TestHdfs public void testHdfsAccess() throws Exception { createHttpFSServer(false, false); - + long oldOpsListStatus = + metricsGetter.get("LISTSTATUS").call(); String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", @@ -408,6 +428,8 @@ public void testHdfsAccess() throws Exception { new InputStreamReader(conn.getInputStream())); reader.readLine(); reader.close(); + Assert.assertEquals(1 + oldOpsListStatus, + (long) metricsGetter.get("LISTSTATUS").call()); } @Test @@ -416,7 +438,8 @@ public void testHdfsAccess() throws Exception { @TestHdfs public void testMkdirs() throws Exception { createHttpFSServer(false, false); - + long oldMkdirOpsStat = + metricsGetter.get("MKDIRS").call(); String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format( "/webhdfs/v1/tmp/sub-tmp?user.name={0}&op=MKDIRS", user)); @@ -424,8 +447,10 @@ public void testMkdirs() throws Exception { conn.setRequestMethod("PUT"); conn.connect(); Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); - getStatus("/tmp/sub-tmp", "LISTSTATUS"); + long opsStat = + metricsGetter.get("MKDIRS").call(); + Assert.assertEquals(1 + oldMkdirOpsStat, opsStat); } @Test @@ -434,7 +459,8 @@ public void testMkdirs() throws Exception { @TestHdfs public void testGlobFilter() throws Exception { createHttpFSServer(false, false); - + long oldOpsListStatus = + metricsGetter.get("LISTSTATUS").call(); FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path("/tmp")); fs.create(new Path("/tmp/foo.txt")).close(); @@ -449,6 +475,8 @@ public void testGlobFilter() throws Exception { new InputStreamReader(conn.getInputStream())); reader.readLine(); reader.close(); + Assert.assertEquals(1 + oldOpsListStatus, + (long) metricsGetter.get("LISTSTATUS").call()); } /** @@ -508,6 +536,9 @@ private void createWithHttp(String filename, String perms, */ private void createDirWithHttp(String dirname, String perms, String unmaskedPerms) throws Exception { + // get the createDirMetrics + long oldOpsMkdir = + metricsGetter.get("MKDIRS").call(); String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; // Remove leading / from filename if (dirname.charAt(0) == '/') { @@ -531,6 +562,8 @@ private void createDirWithHttp(String dirname, String perms, conn.setRequestMethod("PUT"); conn.connect(); Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + Assert.assertEquals(1 + oldOpsMkdir, + (long) metricsGetter.get("MKDIRS").call()); } /** @@ -544,6 +577,8 @@ private void createDirWithHttp(String dirname, String perms, */ private String getStatus(String filename, String command) throws Exception { + long oldOpsStat = + metricsGetter.getOrDefault(command, defaultEntryMetricGetter).call(); String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; // Remove leading / from filename if (filename.charAt(0) == '/') { @@ -559,7 +594,9 @@ private String getStatus(String filename, String command) BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); - + long opsStat = + metricsGetter.getOrDefault(command, defaultExitMetricGetter).call(); + Assert.assertEquals(oldOpsStat + 1L, opsStat); return reader.readLine(); } @@ -1565,7 +1602,7 @@ public void testNoRedirect() throws Exception { new InputStreamReader(conn.getInputStream())); String location = (String)json.get("Location"); Assert.assertTrue(location.contains(DataParam.NAME)); - Assert.assertTrue(location.contains(NoRedirectParam.NAME)); + Assert.assertFalse(location.contains(NoRedirectParam.NAME)); Assert.assertTrue(location.contains("CREATE")); Assert.assertTrue("Wrong location: " + location, location.startsWith(TestJettyHelper.getJettyURL().toString())); @@ -1834,4 +1871,50 @@ public void testStoragePolicySatisfier() throws Exception { assertTrue( xAttrs.containsKey(HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY)); } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testNoRedirectWithData() throws Exception { + createHttpFSServer(false, false); + + final String path = "/file"; + final String username = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + // file creation which should not redirect + URL url = new URL(TestJettyHelper.getJettyURL(), + MessageFormat.format( + "/webhdfs/v1{0}?user.name={1}&op=CREATE&data=true&noredirect=true", + path, username)); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod(HttpMethod.PUT); + conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM); + conn.setDoOutput(true); + conn.connect(); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + JSONObject json = (JSONObject) new JSONParser() + .parse(new InputStreamReader(conn.getInputStream())); + + // get the location to write + String location = (String) json.get("Location"); + Assert.assertTrue(location.contains(DataParam.NAME)); + Assert.assertTrue(location.contains("CREATE")); + url = new URL(location); + conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod(HttpMethod.PUT); + conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM); + conn.setDoOutput(true); + conn.connect(); + final String writeStr = "write some content"; + OutputStream os = conn.getOutputStream(); + os.write(writeStr.getBytes()); + os.close(); + // Verify that file got created + Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); + json = (JSONObject) new JSONParser() + .parse(new InputStreamReader(conn.getInputStream())); + location = (String) json.get("Location"); + Assert.assertEquals(TestJettyHelper.getJettyURL() + "/webhdfs/v1" + path, + location); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml index bb95442605a79..d69ea3915ff08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project-dist hadoop-hdfs-native-client - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop HDFS Native Client Apache Hadoop HDFS Native Client jar diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt index 626c49bf192c6..db8b905cae3fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt @@ -151,7 +151,8 @@ add_subdirectory(main/native/libhdfs-examples) # Temporary fix to disable Libhdfs++ build on older systems that do not support thread_local include(CheckCXXSourceCompiles) unset (THREAD_LOCAL_SUPPORTED CACHE) -set (CMAKE_REQUIRED_DEFINITIONS "-std=c++11") +set (CMAKE_CXX_STANDARD 11) +set (CMAKE_CXX_STANDARD_REQUIRED ON) set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) check_cxx_source_compiles( "#include diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c index 9054287405632..846852bfd0e88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_mini_stress.c @@ -279,7 +279,7 @@ static int testHdfsMiniStressImpl(struct tlhThreadInfo *ti) EXPECT_NONNULL(ti->hdfs); // Error injection on, some failures are expected in the read path. // The expectation is that any memory stomps will cascade and cause - // the following test to fail. Ideally RPC errors would be seperated + // the following test to fail. Ideally RPC errors would be separated // from BlockReader errors (RPC is expected to recover from disconnects). doTestHdfsMiniStress(ti, 1); // No error injection diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c index 220208676e311..840e5b2da0caf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c @@ -3409,7 +3409,7 @@ tOffset hdfsGetUsed(hdfsFS fs) } fss = (jobject)jVal.l; jthr = invokeMethod(env, &jVal, INSTANCE, fss, JC_FS_STATUS, - HADOOP_FSSTATUS,"getUsed", "()J"); + "getUsed", "()J"); destroyLocalReference(env, fss); if (jthr) { errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c index a0f26c6cb6e71..1b6dafaba82ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c @@ -53,7 +53,7 @@ void hdfsThreadDestructor(void *v) char thr_name[MAXTHRID]; /* Detach the current thread from the JVM */ - if (env) { + if ((env != NULL) && (*env != NULL)) { ret = (*env)->GetJavaVM(env, &vm); if (ret != 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c index a6f48fd4a830e..f7abc89908b0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c @@ -46,10 +46,10 @@ static void detachCurrentThreadFromJvm() if (threadLocalStorageGet(&state) || !state) { return; } - if (!state->env) { + env = state->env; + if ((env == NULL) || (*env == NULL)) { return; } - env = state->env; ret = (*env)->GetJavaVM(env, &vm); if (ret) { fprintf(stderr, diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt index 411320ad771e7..2da5b6bbe52e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt @@ -51,7 +51,8 @@ include(CheckCXXSourceCompiles) # Check if thread_local is supported unset (THREAD_LOCAL_SUPPORTED CACHE) -set (CMAKE_REQUIRED_DEFINITIONS "-std=c++11") +set (CMAKE_CXX_STANDARD 11) +set (CMAKE_CXX_STANDARD_REQUIRED ON) set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) check_cxx_source_compiles( "#include @@ -147,12 +148,13 @@ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0") set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0") if(UNIX) -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic -std=c++11 -g -fPIC -fno-strict-aliasing") +set (CMAKE_CXX_STANDARD 11) +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic -g -fPIC -fno-strict-aliasing") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -fPIC -fno-strict-aliasing") endif() if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + set(CMAKE_CXX_STANDARD 11) add_definitions(-DASIO_HAS_STD_ADDRESSOF -DASIO_HAS_STD_ARRAY -DASIO_HAS_STD_ATOMIC -DASIO_HAS_CSTDINT -DASIO_HAS_STD_SHARED_PTR -DASIO_HAS_STD_TYPE_TRAITS -DASIO_HAS_VARIADIC_TEMPLATES -DASIO_HAS_STD_FUNCTION -DASIO_HAS_STD_CHRONO -DASIO_HAS_STD_SYSTEM_ERROR) endif () @@ -263,6 +265,7 @@ if (HADOOP_BUILD) ${CMAKE_THREAD_LIBS_INIT} ) set_target_properties(hdfspp PROPERTIES SOVERSION ${LIBHDFSPP_VERSION}) + hadoop_dual_output_directory(hdfspp ${OUT_DIR}) else (HADOOP_BUILD) add_library(hdfspp_static STATIC ${EMPTY_FILE_CC} ${LIBHDFSPP_ALL_OBJECTS}) target_link_libraries(hdfspp_static diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/uri.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/uri.h index d8574d15b3195..bc3d8b96d3f66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/uri.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/uri.h @@ -103,7 +103,7 @@ class URI { std::string str(bool encoded_output=true) const; - // Get a string with each URI field printed on a seperate line + // Get a string with each URI field printed on a separate line std::string GetDebugString() const; private: // These are stored in encoded form diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc index 6b2468fd5dbdc..424bb6b7eb409 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc @@ -1402,7 +1402,7 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, struct hdfsBlockLocations hdfsBlockLocations *locations = new struct hdfsBlockLocations(); (*locations_out) = locations; - bzero(locations, sizeof(*locations)); + explicit_bzero(locations, sizeof(*locations)); locations->fileLength = ppLocations->getFileLength(); locations->isLastBlockComplete = ppLocations->isLastBlockComplete(); locations->isUnderConstruction = ppLocations->isUnderConstruction(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc index 79771f0d7c57c..e69ddb26af4fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_ext_test.cc @@ -475,7 +475,7 @@ TEST_F(HdfsExtTest, TestReadStats) { hdfsFile file = hdfsOpenFile(fs, path.c_str(), O_WRONLY, 0, 0, 0); EXPECT_NE(nullptr, file); void * buf = malloc(size); - bzero(buf, size); + explicit_bzero(buf, size); EXPECT_EQ(size, hdfsWrite(fs, file, buf, size)); free(buf); EXPECT_EQ(0, hdfsCloseFile(fs, file)); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h index aecced1a8b6e5..98edbdc1d6501 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfspp_mini_dfs.h @@ -92,7 +92,7 @@ class HdfsHandle { hdfsFile file = hdfsOpenFile(*this, path.c_str(), O_WRONLY, 0, 0, 0); EXPECT_NE(nullptr, file); void * buf = malloc(size); - bzero(buf, size); + explicit_bzero(buf, size); EXPECT_EQ(1024, hdfsWrite(*this, file, buf, size)); EXPECT_EQ(0, hdfsCloseFile(*this, file)); free(buf); diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/uriparser2/uriparser/UriFile.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/uriparser2/uriparser/UriFile.c index 5471e5af8ccb2..22f38bee25bd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/uriparser2/uriparser/UriFile.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/third_party/uriparser2/uriparser2/uriparser/UriFile.c @@ -90,7 +90,7 @@ static URI_INLINE int URI_FUNC(FilenameToUriString)(const URI_CHAR * filename, if ((input[0] == _UT('\0')) || (fromUnix && input[0] == _UT('/')) || (!fromUnix && input[0] == _UT('\\'))) { - /* Copy text after last seperator */ + /* Copy text after last separator */ if (lastSep + 1 < input) { if (!fromUnix && absolute && (firstSegment == URI_TRUE)) { /* Quick hack to not convert "C:" to "C%3A" */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index 2d30f67ef34a2..7c0bf933e0ff1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-hdfs-nfs - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop HDFS-NFS Apache Hadoop HDFS-NFS jar @@ -84,8 +84,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava compile diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index 27213953802f7..3b0327ad4a149 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -55,7 +55,7 @@ import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.ChannelHandlerContext; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * RPC program corresponding to mountd daemon. See {@link Mountd}. diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java index e0fb302992850..41add2212936a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java @@ -29,7 +29,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSClient; @@ -40,13 +40,13 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ShutdownHookManager; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Objects; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalListener; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalNotification; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java index 8494493beed70..ff64ad5804609 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java @@ -26,7 +26,7 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Base; import org.apache.hadoop.util.StringUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Nfs server. Supports NFS v3 using {@link RpcProgramNfs3}. diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OffsetRange.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OffsetRange.java index 764524a8ff677..3995fa5566bb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OffsetRange.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OffsetRange.java @@ -19,7 +19,7 @@ import java.util.Comparator; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * OffsetRange is the range of read/write request. A single point (e.g.,[5,5]) diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index 6067a5df34786..528ead7a003b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -57,8 +57,8 @@ import org.apache.hadoop.util.Time; import org.jboss.netty.channel.Channel; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index 5c915d26bf1e0..b8db83c89a3a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -30,9 +30,9 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * A cache saves OpenFileCtx objects for different users. Each cache entry is diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index cb46f449a1f00..d436eac598be0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -134,7 +134,7 @@ import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelHandlerContext; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java index 98f3d6cfa2930..76859247bf2a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java @@ -29,8 +29,8 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.jboss.netty.channel.Channel; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * WriteCtx saves the context of one write request, such as request, channel, diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java index 35542391bdb4c..288937104084b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java @@ -45,7 +45,7 @@ import org.apache.hadoop.security.IdMappingServiceProvider; import org.jboss.netty.channel.Channel; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Manage the writes and responds asynchronously. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index 777921746a66f..bc38a38870254 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project-dist hadoop-hdfs-rbf - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop HDFS-RBF Apache Hadoop HDFS-RBF jar @@ -114,6 +114,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> mockito-core test + + org.assertj + assertj-core + test + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java index 5f06f5918ea5f..64936e28a651e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java @@ -33,7 +33,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import static org.apache.hadoop.util.Time.monotonicNow; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index 2e54765d44224..f589602f50b5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -61,9 +61,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; /** * Expose the Namenode metrics as the Router was one. @@ -709,6 +709,11 @@ public int getNumEnteringMaintenanceDataNodes() { return 0; } + @Override + public int getNumInServiceLiveDataNodes() { + return 0; + } + @Override public int getVolumeFailuresTotal() { return 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java index 05398e77e24f5..d626c23c9dc40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java @@ -88,7 +88,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Implementation of the Router metrics collector. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java index 64bb10822f9f8..b4932219d39c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java @@ -32,7 +32,7 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeInt; import org.apache.hadoop.metrics2.lib.MutableRate; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Implementations of the JMX interface for the State Store metrics. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index 96b560c649a97..797006ab1de4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -61,9 +61,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; /** * Mount table to map between global paths and remote locations. This allows the diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java index b09a883ebcb77..9beffe757d424 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java @@ -32,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Mount table resolver that supports multiple locations for each mount entry. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java index 883a126236e6f..88e20649506dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java @@ -39,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Order the destinations based on available space. This resolver uses a diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/HashResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/HashResolver.java index 455a3edb87d0b..3f8c354913c18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/HashResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/HashResolver.java @@ -29,7 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Order the destinations based on consistent hashing. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java index 58a8ed278b3dc..3da655e35d094 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java @@ -42,8 +42,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.net.HostAndPort; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RandomResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RandomResolver.java index 13643e5e9f754..d21eef545b3b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RandomResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RandomResolver.java @@ -25,7 +25,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; /** * Order the destinations randomly. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 74bbbb572fd27..9ec3b54ed50b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -32,7 +32,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index b84848089a319..52e7cebd26017 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -31,7 +31,7 @@ import javax.net.SocketFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -47,7 +47,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.RefreshUserMappingsProtocol; @@ -379,7 +379,7 @@ protected static ConnectionContext newConnection(Configuration conf, throw new IllegalStateException(msg); } ProtoImpl classes = PROTO_MAP.get(proto); - RPC.setProtocolEngine(conf, classes.protoPb, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, classes.protoPb, ProtobufRpcEngine2.class); final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy(conf, HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java index 7cb343b1d53e8..8b537f9d94c11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPoolId.java @@ -23,7 +23,7 @@ import java.util.Collections; import java.util.List; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherService.java index e3ecd266ab3df..0246ea98c29f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherService.java @@ -34,17 +34,19 @@ import org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils; import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalListener; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalNotification; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * This service is invoked from {@link MountTableStore} when there is change in @@ -170,7 +172,12 @@ public RouterClient load(String adminAddress) throws IOException { @VisibleForTesting protected RouterClient createRouterClient(InetSocketAddress routerSocket, Configuration config) throws IOException { - return new RouterClient(routerSocket, config); + return SecurityUtil.doAsLoginUser(() -> { + if (UserGroupInformation.isSecurityEnabled()) { + UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); + } + return new RouterClient(routerSocket, config); + }); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java index c9967a20736e6..a077c4b3f45a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,10 +63,16 @@ public MountTableRefresherThread(MountTableManager manager, @Override public void run() { try { - RefreshMountTableEntriesResponse refreshMountTableEntries = - manager.refreshMountTableEntries( - RefreshMountTableEntriesRequest.newInstance()); - success = refreshMountTableEntries.getResult(); + SecurityUtil.doAsLoginUser(() -> { + if (UserGroupInformation.isSecurityEnabled()) { + UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); + } + RefreshMountTableEntriesResponse refreshMountTableEntries = manager + .refreshMountTableEntries( + RefreshMountTableEntriesRequest.newInstance()); + success = refreshMountTableEntries.getResult(); + return true; + }); } catch (IOException e) { LOG.error("Failed to refresh mount table entries cache at router {}", adminAddress, e); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java index 5e1222247286c..b690b8685c0a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Service to periodically execute a runnable. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java index 2919ddc4c506d..9d84559212428 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java @@ -42,8 +42,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.ListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ListMultimap; /** * Module that implements the quota relevant RPC calls diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 64fdabe43b18c..d6e5a1cfe9626 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -56,7 +56,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Router that provides a unified view of multiple federated HDFS clusters. It diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 5fd7c79f88f1a..159b1033e5825 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -29,7 +29,7 @@ import java.util.Map; import java.util.Set; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; @@ -75,7 +75,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.RefreshRegistry; @@ -90,7 +90,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.BlockingService; /** @@ -136,7 +136,7 @@ public RouterAdminServer(Configuration conf, Router router) RBFConfigKeys.DFS_ROUTER_ADMIN_HANDLER_COUNT_DEFAULT); RPC.setProtocolEngine(this.conf, RouterAdminProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); RouterAdminProtocolServerSideTranslatorPB routerAdminProtocolTranslator = new RouterAdminProtocolServerSideTranslatorPB(this); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java index 0641c0b82afb6..ee29b7dd2b513 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolTranslatorPB; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.resolver.RouterGenericManager; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -47,7 +47,7 @@ private static RouterAdminProtocolTranslatorPB createRouterProxy( throws IOException { RPC.setProtocolEngine( - conf, RouterAdminProtocolPB.class, ProtobufRpcEngine.class); + conf, RouterAdminProtocolPB.class, ProtobufRpcEngine2.class); AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false); final long version = RPC.getProtocolVersion(RouterAdminProtocolPB.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 6a28c4f505889..baee979dfe79f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -96,7 +96,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.FileNotFoundException; import java.io.IOException; @@ -1768,10 +1768,11 @@ public void satisfyStoragePolicy(String path) throws IOException { } @Override - public HAServiceProtocol.HAServiceState getHAServiceState() - throws IOException { - rpcServer.checkOperation(NameNode.OperationCategory.READ, false); - return null; + public HAServiceProtocol.HAServiceState getHAServiceState() { + if (rpcServer.isSafeMode()) { + return HAServiceProtocol.HAServiceState.STANDBY; + } + return HAServiceProtocol.HAServiceState.ACTIVE; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFsck.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFsck.java index 898099c908a03..9a90677644f05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterFsck.java @@ -33,6 +33,7 @@ import java.util.Map.Entry; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; @@ -48,6 +49,7 @@ * Wrapper for the Router to offer the Namenode FSCK. */ @InterfaceAudience.Private +@InterfaceStability.Unstable public class RouterFsck { public static final Logger LOG = @@ -69,6 +71,10 @@ public RouterFsck(Router router, Map pmap, public void fsck() { final long startTime = Time.monotonicNow(); try { + String warnMsg = "Now FSCK to DFSRouter is unstable feature. " + + "There may be incompatible changes between releases."; + LOG.warn(warnMsg); + out.println(warnMsg); String msg = "Federated FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " at " + new Date(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java index c497d85335922..37407c2a3b8aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java @@ -21,7 +21,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.federation.store.CachedRecordStore; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index dae4b9356436c..ef255d99773db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -29,6 +29,7 @@ import java.lang.reflect.Method; import java.net.ConnectException; import java.net.InetSocketAddress; +import java.net.SocketException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -74,8 +75,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * A client proxy for Router to NN communication using the NN ClientProtocol. @@ -582,9 +583,9 @@ private Object invoke(String nsId, int retryCount, final Method method, * @return If the exception comes from an unavailable subcluster. */ public static boolean isUnavailableException(IOException ioe) { - if (ioe instanceof ConnectException || - ioe instanceof ConnectTimeoutException || + if (ioe instanceof ConnectTimeoutException || ioe instanceof EOFException || + ioe instanceof SocketException || ioe instanceof StandbyException) { return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 345ec705f2cd8..559a7a2209499 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -133,7 +133,7 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.RemoteException; @@ -156,7 +156,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.BlockingService; /** @@ -256,7 +256,7 @@ public RouterRpcServer(Configuration configuration, Router router, readerQueueSize); RPC.setProtocolEngine(this.conf, ClientNamenodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ClientNamenodeProtocolServerSideTranslatorPB clientProtocolServerTranslator = @@ -526,8 +526,7 @@ void checkOperation(OperationCategory op) * client requests. */ private void checkSafeMode() throws StandbyException { - RouterSafemodeService safemodeService = router.getSafemodeService(); - if (safemodeService != null && safemodeService.isInSafeMode()) { + if (isSafeMode()) { // Throw standby exception, router is not available if (rpcMonitor != null) { rpcMonitor.routerFailureSafemode(); @@ -538,6 +537,16 @@ private void checkSafeMode() throws StandbyException { } } + /** + * Return true if the Router is in safe mode. + * + * @return true if the Router is in safe mode. + */ + boolean isSafeMode() { + RouterSafemodeService safemodeService = router.getSafemodeService(); + return (safemodeService != null && safemodeService.isInSafeMode()); + } + /** * Get the name of the method that is calling this function. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index 9f0d06d7695cd..061a556c2ea9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -115,20 +115,18 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods { private static final Logger LOG = LoggerFactory.getLogger(RouterWebHdfsMethods.class); - private static final ThreadLocal REMOTE_ADDRESS = - new ThreadLocal(); - private @Context HttpServletRequest request; private String method; private String query; private String reqPath; + private String remoteAddr; public RouterWebHdfsMethods(@Context HttpServletRequest request) { super(request); this.method = request.getMethod(); this.query = request.getQueryString(); this.reqPath = request.getServletPath(); - REMOTE_ADDRESS.set(JspHelper.getRemoteAddr(request)); + this.remoteAddr = JspHelper.getRemoteAddr(request); } @Override @@ -139,7 +137,7 @@ protected void init(final UserGroupInformation ugi, final Param... parameters) { super.init(ugi, delegation, username, doAsUser, path, op, parameters); - REMOTE_ADDRESS.set(JspHelper.getRemoteAddr(request)); + remoteAddr = JspHelper.getRemoteAddr(request); } @Override @@ -153,12 +151,12 @@ protected ClientProtocol getRpcClientProtocol() throws IOException { } private void reset() { - REMOTE_ADDRESS.set(null); + remoteAddr = null; } @Override protected String getRemoteAddr() { - return REMOTE_ADDRESS.get(); + return remoteAddr; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java index 8e7a34381cff7..7b0787f0f1613 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/RouterSecurityManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.federation.router.security; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java index 66c288238eb76..95a38588324cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java @@ -53,7 +53,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A service to initialize a diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java index 15fc9c1ae9f6c..8352bca12e9aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java @@ -44,7 +44,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * {@link StateStoreDriver} implementation based on files. In this approach, we diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java index 60dbcdc10ef44..cedc784e39f15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; /** * StateStoreDriver implementation based on a local file. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java index 86721eaa476b6..6b39e20bd7ee7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java @@ -21,7 +21,7 @@ import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Abstract base of a data record in the StateStore. All StateStore records are diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java index d1351a340c3cf..5d7d5c2966997 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java @@ -430,6 +430,8 @@ public int hashCode() { .append(this.isReadOnly()) .append(this.getDestOrder()) .append(this.isFaultTolerant()) + .append(this.getQuota().getQuota()) + .append(this.getQuota().getSpaceQuota()) .toHashCode(); } @@ -443,6 +445,9 @@ public boolean equals(Object obj) { .append(this.isReadOnly(), other.isReadOnly()) .append(this.getDestOrder(), other.getDestOrder()) .append(this.isFaultTolerant(), other.isFaultTolerant()) + .append(this.getQuota().getQuota(), other.getQuota().getQuota()) + .append(this.getQuota().getSpaceQuota(), + other.getQuota().getSpaceQuota()) .isEquals(); } return false; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java index fc3e49ff9dbb0..ab7bfb16cb822 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java @@ -33,8 +33,8 @@ * or remove nodes, it minimizes the item migration. */ public class ConsistentHashRing { - private static final String SEPERATOR = "/"; - private static final String VIRTUAL_NODE_FORMAT = "%s" + SEPERATOR + "%d"; + private static final String SEPARATOR = "/"; + private static final String VIRTUAL_NODE_FORMAT = "%s" + SEPARATOR + "%d"; /** Hash ring. */ private SortedMap ring = new TreeMap(); @@ -119,7 +119,7 @@ public String getLocation(String item) { hash = tailMap.isEmpty() ? ring.firstKey() : tailMap.firstKey(); } String virtualNode = ring.get(hash); - int index = virtualNode.lastIndexOf(SEPERATOR); + int index = virtualNode.lastIndexOf(SEPARATOR); if (index >= 0) { return virtualNode.substring(0, index); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index 5ea33237b672c..7422989d6aad2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -71,7 +71,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RefreshResponse; import org.apache.hadoop.ipc.RemoteException; @@ -1222,7 +1222,7 @@ public int genericRefresh(String[] argv, int i) throws IOException { InetSocketAddress address = NetUtils.createSocketAddr(hostport); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine2.class); GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB)RPC.getProxy( xface, RPC.getProtocolVersion(xface), address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js index d2f372920a64d..448e2790e30ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js @@ -108,7 +108,8 @@ */ function view_perm_details(e, filename, abs_path, perms) { $('.explorer-perm-links').popover('destroy'); - e.popover({html: true, content: $('#explorer-popover-perm-info').html(), trigger: 'focus'}) + setTimeout(function() { + e.popover({html: true,sanitize: false, content: $('#explorer-popover-perm-info').html(), trigger: 'focus'}) .on('shown.bs.popover', function(e) { var popover = $(this), parent = popover.parent(); //Convert octal to binary permissions @@ -122,6 +123,7 @@ }); }) .popover('show'); + }, 100); } // Use WebHDFS to set permissions on an absolute path diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java index dd2bbff7d8da2..b1e4a05500fda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java @@ -71,4 +71,9 @@ public void testRmRootRecursive() { public void testRmEmptyRootDirRecursive() { // It doesn't apply because we still have the mount points here } + + @Override + public void testSimpleRootListing() { + // It doesn't apply because DFSRouter dosn't support LISTSTATUS_BATCH. + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java index 31c1bffe1efd5..fd6dc7fc9b0a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java @@ -92,7 +92,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Helper utilities for testing HDFS Federation. @@ -474,7 +474,10 @@ public static RouterClient getAdminClient( /** * Add a mount table entry in some name services and wait until it is - * available. + * available. If there are multiple routers, + * {@link #createMountTableEntry(List, String, DestinationOrder, Collection)} + * should be used instead because the method does not refresh + * the mount tables of the other routers. * @param router Router to change. * @param mountPoint Name of the mount point. * @param order Order of the mount table entry. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java index 699ea92abb598..f908065384193 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockNamenode.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceStatus; @@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -88,7 +90,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.RemoteException; @@ -172,7 +174,7 @@ public HAServiceStatus answer(InvocationOnMock invocation) */ private void setupRPCServer(final Configuration conf) throws IOException { RPC.setProtocolEngine( - conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class); + conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine2.class); ClientNamenodeProtocolServerSideTranslatorPB clientNNProtoXlator = new ClientNamenodeProtocolServerSideTranslatorPB(mockNn); @@ -537,7 +539,10 @@ private static LocatedBlock getMockLocatedBlock(final String nsId) { DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111, 1112, 1113, 1114); DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); - when(lb.getLocations()).thenReturn(new DatanodeInfo[] {dnInfo}); + DatanodeInfoWithStorage datanodeInfoWithStorage = + new DatanodeInfoWithStorage(dnInfo, "storageID", StorageType.DEFAULT); + when(lb.getLocations()) + .thenReturn(new DatanodeInfoWithStorage[] {datanodeInfoWithStorage}); ExtendedBlock eb = mock(ExtendedBlock.class); when(eb.getBlockPoolId()).thenReturn(nsId); when(lb.getBlock()).thenReturn(eb); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java index 97cf94dc85f62..44c0fc7ed3095 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java @@ -274,9 +274,18 @@ public void testNamenodeHeartBeatEnableDefault() throws IOException { */ private void checkNamenodeHeartBeatEnableDefault(boolean enable) throws IOException { - final Router router = new Router(); - try { + try (Router router = new Router()) { + // Use default config Configuration config = new HdfsConfiguration(); + // bind to any available port + config.set(RBFConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY, "0.0.0.0"); + config.set(RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY, "127.0.0.1:0"); + config.set(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, "127.0.0.1:0"); + config.set(RBFConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY, "0.0.0.0"); + config.set(RBFConfigKeys.DFS_ROUTER_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + config.set(RBFConfigKeys.DFS_ROUTER_HTTPS_ADDRESS_KEY, "127.0.0.1:0"); + config.set(RBFConfigKeys.DFS_ROUTER_HTTP_BIND_HOST_KEY, "0.0.0.0"); + config.setBoolean(RBFConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE, enable); router.init(config); if (enable) { @@ -284,8 +293,6 @@ private void checkNamenodeHeartBeatEnableDefault(boolean enable) } else { assertNull(router.getNamenodeHeartbeatServices()); } - } finally { - router.close(); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index 303b3f6433a62..837607cea52d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -64,7 +64,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Tests Router admin commands. @@ -932,6 +932,10 @@ public void testSafeModeStatus() throws Exception { // ensure the Router become RUNNING state waitState(RouterServiceState.RUNNING); assertFalse(routerContext.getRouter().getSafemodeService().isInSafeMode()); + final RouterClientProtocol clientProtocol = + routerContext.getRouter().getRpcServer().getClientProtocolModule(); + assertEquals(HAServiceState.ACTIVE, clientProtocol.getHAServiceState()); + assertEquals(0, ToolRunner.run(admin, new String[] {"-safemode", "enter" })); @@ -944,6 +948,7 @@ public void testSafeModeStatus() throws Exception { // verify state using RBFMetrics assertEquals(RouterServiceState.SAFEMODE.toString(), jsonString); assertTrue(routerContext.getRouter().getSafemodeService().isInSafeMode()); + assertEquals(HAServiceState.STANDBY, clientProtocol.getHAServiceState()); System.setOut(new PrintStream(out)); assertEquals(0, @@ -955,6 +960,7 @@ public void testSafeModeStatus() throws Exception { // verify state assertEquals(RouterServiceState.RUNNING.toString(), jsonString); assertFalse(routerContext.getRouter().getSafemodeService().isInSafeMode()); + assertEquals(HAServiceState.ACTIVE, clientProtocol.getHAServiceState()); out.reset(); assertEquals(0, ToolRunner.run(admin, new String[] {"-safemode", "get" })); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java index 5e0e11752341d..bf571e2ff790c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java @@ -248,6 +248,7 @@ private void testWriteWithFailedSubcluster(final DestinationOrder order) LOG.info("Setup {} with order {}", mountPoint, order); createMountTableEntry( getRandomRouter(), mountPoint, order, namenodes.keySet()); + refreshRoutersCaches(routers); LOG.info("Write in {} should succeed writing in ns0 and fail for ns1", mountPath); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java index 36a00e507633e..c2577e67a06fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java @@ -41,7 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * Test suite covering RouterPolicyProvider. We expect that it contains a diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index 7eb9badaafbcd..5326d48be2d31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -74,7 +74,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Tests quota behaviors in Router-based Federation. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java index cea5212965cec..a16f9d10c85fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java @@ -54,7 +54,7 @@ import org.junit.Test; import org.junit.rules.Timeout; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Test retry behavior of the Router RPC Client. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java index d00b93c43062c..bcab7bb0ba706 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java @@ -25,6 +25,7 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -34,6 +35,7 @@ import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Options.Rename; @@ -56,7 +58,9 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.hdfs.tools.federation.RouterAdmin; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.ToolRunner; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -509,6 +513,43 @@ public void testRenameMultipleDestDirectories() throws Exception { verifyRenameOnMultiDestDirectories(DestinationOrder.SPACE, true); } + @Test + public void testClearQuota() throws Exception { + long nsQuota = 5; + long ssQuota = 100; + Path path = new Path("/router_test"); + nnFs0.mkdirs(path); + nnFs1.mkdirs(path); + MountTable addEntry = MountTable.newInstance("/router_test", + Collections.singletonMap("ns0", "/router_test")); + addEntry.setQuota(new RouterQuotaUsage.Builder().build()); + assertTrue(addMountTable(addEntry)); + RouterQuotaUpdateService updateService = + routerContext.getRouter().getQuotaCacheUpdateService(); + updateService.periodicInvoke(); + + //set quota and validate the quota + RouterAdmin admin = getRouterAdmin(); + String[] argv = new String[] {"-setQuota", path.toString(), "-nsQuota", + String.valueOf(nsQuota), "-ssQuota", String.valueOf(ssQuota)}; + assertEquals(0, ToolRunner.run(admin, argv)); + updateService.periodicInvoke(); + resolver.loadCache(true); + ContentSummary cs = routerFs.getContentSummary(path); + assertEquals(nsQuota, cs.getQuota()); + assertEquals(ssQuota, cs.getSpaceQuota()); + + //clear quota and validate the quota + argv = new String[] {"-clrQuota", path.toString()}; + assertEquals(0, ToolRunner.run(admin, argv)); + updateService.periodicInvoke(); + resolver.loadCache(true); + //quota should be cleared + ContentSummary cs1 = routerFs.getContentSummary(path); + assertEquals(-1, cs1.getQuota()); + assertEquals(-1, cs1.getSpaceQuota()); + } + /** * Test to verify rename operation on directories in case of multiple * destinations. @@ -690,4 +731,12 @@ private static FileSystem getFileSystem(final String nsId) { return null; } + private RouterAdmin getRouterAdmin() { + Router router = routerContext.getRouter(); + Configuration configuration = routerContext.getConf(); + InetSocketAddress routerSocket = router.getAdminServerAddress(); + configuration.setSocketAddr(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY, + routerSocket); + return new RouterAdmin(configuration); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index fcf742d9dad97..bfc712f057afd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.addDirectory; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.countContents; import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile; @@ -128,8 +129,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; -import com.google.common.collect.Maps; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * The the RPC interface of the {@link Router} implemented by @@ -192,8 +193,14 @@ public int compare( @BeforeClass public static void globalSetUp() throws Exception { + Configuration namenodeConf = new Configuration(); + // It's very easy to become overloaded for some specific dn in this small + // cluster, which will cause the EC file block allocation failure. To avoid + // this issue, we disable considerLoad option. + namenodeConf.setBoolean(DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS); cluster.setNumDatanodesPerNameservice(NUM_DNS); + cluster.addNamenodeOverrides(namenodeConf); cluster.setIndependentDNs(); // Start NNs and DNs and wait until ready diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/hdfs.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/hdfs.xml index 261d4ba136508..ae280a8e450c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/hdfs.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/hdfs.xml @@ -111,4 +111,19 @@ true + + fs.contract.supports-hflush + true + + + + fs.contract.supports-hsync + true + + + + fs.contract.metadata_updated_on_hsync + false + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml index 0cb6dd8a6d0f1..45aaa2264250c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml @@ -28,4 +28,19 @@ true + + fs.contract.supports-hflush + false + + + + fs.contract.supports-hsync + false + + + + fs.contract.metadata_updated_on_hsync + false + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.2.2.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.2.2.xml new file mode 100644 index 0000000000000..811d305856a5b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.2.2.xml @@ -0,0 +1,674 @@ + + + + + + + + + + + A distributed implementation of {@link +org.apache.hadoop.fs.FileSystem}. This is loosely modelled after +Google's GFS.

    + +

    The most important difference is that unlike GFS, Hadoop DFS files +have strictly one writer at any one time. Bytes are always appended +to the end of the writer's stream. There is no notion of "record appends" +or "mutations" that are then checked or reordered. Writers simply emit +a byte stream. That byte stream is guaranteed to be stored in the +order written.

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This method must return as quickly as possible, since it's called + in a critical section of the NameNode's operation. + + @param succeeded Whether authorization succeeded. + @param userName Name of the user executing the request. + @param addr Remote address of the request. + @param cmd The requested command. + @param src Path of affected source file. + @param dst Path of affected destination file (if any). + @param stat File information for operations that change the file's + metadata (permissions, owner, times, etc).]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index be2164fc077eb..3d9ef5232931a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project-dist hadoop-hdfs - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop HDFS Apache Hadoop HDFS jar @@ -68,8 +68,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava compile @@ -219,6 +219,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> assertj-core test + + org.lz4 + lz4-java + test + @@ -349,6 +354,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> replace-sources false + + **/DFSUtil.java + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b2f8ad2a5a41b..f7da4cb97c277 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -243,6 +243,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.namenode.read.considerLoad"; public static final boolean DFS_NAMENODE_READ_CONSIDERLOAD_DEFAULT = false; + public static final String DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY = + "dfs.namenode.read.considerStorageType"; + public static final boolean DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_DEFAULT = + false; public static final String DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR = "dfs.namenode.redundancy.considerLoad.factor"; public static final double @@ -261,8 +265,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { = "dfs.namenode.file.close.num-committed-allowed"; public static final int DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_DEFAULT = 0; - public static final String DFS_NAMENODE_STRIPE_MIN_KEY = "dfs.namenode.stripe.min"; - public static final int DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1; public static final String DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY = "dfs.namenode.safemode.replication.min"; @@ -660,6 +662,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT = false; public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY = "dfs.namenode.audit.log.async"; public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT = false; + public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_KEY = "dfs.namenode.audit.log.async.blocking"; + public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_DEFAULT = true; + public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_KEY = "dfs.namenode.audit.log.async.buffer.size"; + public static final int DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_DEFAULT = 128; public static final String DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST = "dfs.namenode.audit.log.debug.cmdlist"; public static final String DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY = "dfs.namenode.metrics.logger.period.seconds"; @@ -842,6 +848,18 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 24; // 3 weeks. public static final String DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = "dfs.block.scanner.volume.bytes.per.second"; public static final long DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L; + /** + * The amount of time in milliseconds that the BlockScanner times out waiting + * for the VolumeScanner thread to join during a shutdown call. + */ + public static final String DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY = + "dfs.block.scanner.volume.join.timeout.ms"; + public static final long DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT = + TimeUnit.SECONDS.toMillis(5); + public static final String DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED = + "dfs.block.scanner.skip.recent.accessed"; + public static final boolean DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT = + false; public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed"; public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true; public static final String DFS_HEARTBEAT_INTERVAL_KEY = "dfs.heartbeat.interval"; @@ -1160,6 +1178,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.ha.nn.not-become-active-in-safemode"; public static final boolean DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT = false; + public static final String DFS_HA_ALLOW_STALE_READ_KEY = + "dfs.ha.allow.stale.reads"; + public static final boolean DFS_HA_ALLOW_STALE_READ_DEFAULT = false; // Security-related configs public static final String DFS_ENCRYPT_DATA_TRANSFER_KEY = "dfs.encrypt.data.transfer"; @@ -1353,7 +1374,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.datanode.parallel.volumes.load.threads.num"; public static final String DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY = "dfs.datanode.block.id.layout.upgrade.threads"; - public static final int DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS = 12; + public static final int DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS = 6; public static final String DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_KEY = "dfs.namenode.inotify.max.events.per.rpc"; @@ -1460,6 +1481,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.namenode.state.context.enabled"; public static final boolean DFS_NAMENODE_STATE_CONTEXT_ENABLED_DEFAULT = false; + /** + * whether to protect the subdirectories of directories which + * set on fs.protected.directories. + */ + public static final String DFS_PROTECTED_SUBDIRECTORIES_ENABLE = + "dfs.protected.subdirectories.enable"; + // Default value for DFS_PROTECTED_SUBDIRECTORIES_ENABLE. + public static final boolean DFS_PROTECTED_SUBDIRECTORIES_ENABLE_DEFAULT = + false; + // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry @Deprecated public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index c5ba8b96cf321..ad6125785d533 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -72,6 +72,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.INodesInPath; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.security.AccessControlException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -92,7 +93,7 @@ import org.apache.hadoop.hdfs.web.AuthFilterInitializer; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer2; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AuthenticationFilterInitializer; @@ -103,11 +104,11 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ToolRunner; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.thirdparty.protobuf.BlockingService; @InterfaceAudience.Private @@ -1295,6 +1296,27 @@ static URI trimUri(URI uri) { */ public static void addPBProtocol(Configuration conf, Class protocol, BlockingService service, RPC.Server server) throws IOException { + RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine2.class); + server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service); + } + + /** + * Add protobuf based protocol to the {@link RPC.Server}. + * This engine uses Protobuf 2.5.0. Recommended to upgrade to + * Protobuf 3.x from hadoop-thirdparty and use + * {@link DFSUtil#addPBProtocol(Configuration, Class, BlockingService, + * RPC.Server)}. + * @param conf configuration + * @param protocol Protocol interface + * @param service service that implements the protocol + * @param server RPC server to which the protocol & implementation is + * added to + * @throws IOException + */ + @Deprecated + public static void addPBProtocol(Configuration conf, Class protocol, + com.google.protobuf.BlockingService service, RPC.Server server) + throws IOException { RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class); server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service); } @@ -1787,6 +1809,18 @@ public static void checkProtectedDescendants( + descendant); } } + + if (fsd.isProtectedSubDirectoriesEnable()) { + while (!src.isEmpty()) { + int index = src.lastIndexOf(Path.SEPARATOR_CHAR); + src = src.substring(0, index); + if (protectedDirs.contains(src)) { + throw new AccessControlException( + "Cannot delete/rename subdirectory under protected subdirectory " + + src); + } + } + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index aebc28aa793e3..53d3b4b2936cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_BIND_HOST_KEY; @@ -54,9 +56,9 @@ import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.LoggerFactory; @InterfaceAudience.Private @@ -220,11 +222,12 @@ public static List getConfForOtherNodes( * @return true if the NN should allow read operations while in standby mode. */ public static boolean shouldAllowStandbyReads(Configuration conf) { - return conf.getBoolean("dfs.ha.allow.stale.reads", false); + return conf.getBoolean(DFS_HA_ALLOW_STALE_READ_KEY, + DFS_HA_ALLOW_STALE_READ_DEFAULT); } public static void setAllowStandbyReads(Configuration conf, boolean val) { - conf.setBoolean("dfs.ha.allow.stale.reads", val); + conf.setBoolean(DFS_HA_ALLOW_STALE_READ_KEY, val); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index 3063083db8840..2a56ef3e1868b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -48,7 +48,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.AlignmentContext; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProxyCombiner; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RefreshCallQueueProtocol; @@ -305,7 +305,7 @@ private static BalancerProtocols createNNProxyWithBalancerProtocol( private static T createNameNodeProxy(InetSocketAddress address, Configuration conf, UserGroupInformation ugi, Class xface, int rpcTimeout, AlignmentContext alignmentContext) throws IOException { - RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine2.class); return RPC.getProtocolProxy(xface, RPC.getProtocolVersion(xface), address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), rpcTimeout, null, null, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java index 9082b910eb45c..02d2c43191d6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.net; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java index 7a9afabc1edb9..72c89f57872b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.net; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.net.InnerNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java index 6d2c0ac3c0391..d9baa8ff45f74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.Replica; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java index 2c0d26363b3cf..aef009a03a1dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import java.util.Date; @@ -28,7 +28,7 @@ import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.IntrusiveCollection.Element; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Namenode class that tracks state related to a cached path. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java index fc1717f26a5f9..edc184025cefa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java @@ -23,10 +23,10 @@ import org.apache.hadoop.classification.InterfaceAudience; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * LayoutFlags represent features which the FSImage and edit logs can either diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java index ae17761c204ab..0e2dc71930615 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java @@ -21,7 +21,7 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY; import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.DataOutputStream; @@ -59,8 +59,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Negotiates SASL for DataTransferProtocol on behalf of a server. There are diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 9bd82485a508d..e0afe006a2f9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -688,7 +688,8 @@ public Rename2ResponseProto rename2(RpcController controller, ArrayList optionList = new ArrayList(); if(req.getOverwriteDest()) { optionList.add(Rename.OVERWRITE); - } else if(req.hasMoveToTrash() && req.getMoveToTrash()) { + } + if (req.hasMoveToTrash() && req.getMoveToTrash()) { optionList.add(Rename.TO_TRASH); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeLifelineProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeLifelineProtocolClientSideTranslatorPB.java index 050073fb952ed..220e9e2835625 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeLifelineProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeLifelineProtocolClientSideTranslatorPB.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; @@ -57,7 +57,7 @@ public class DatanodeLifelineProtocolClientSideTranslatorPB implements public DatanodeLifelineProtocolClientSideTranslatorPB( InetSocketAddress nameNodeAddr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, DatanodeLifelineProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); rpcProxy = createNamenode(nameNodeAddr, conf, ugi); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index 6ab98e5880c31..add19e9e102ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -62,14 +62,14 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.RpcController; import org.apache.hadoop.thirdparty.protobuf.ServiceException; @@ -99,7 +99,7 @@ public DatanodeProtocolClientSideTranslatorPB(DatanodeProtocolPB rpcProxy) { public DatanodeProtocolClientSideTranslatorPB(InetSocketAddress nameNodeAddr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, DatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); rpcProxy = createNamenode(nameNodeAddr, conf, ugi); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 5ecbcb7bcd590..9244b9fef8571 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -61,7 +61,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.protobuf.RpcController; import org.apache.hadoop.thirdparty.protobuf.ServiceException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java index 64d57562a1811..031b0e4512ad3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; @@ -62,7 +62,7 @@ public InterDatanodeProtocolTranslatorPB(InetSocketAddress addr, int socketTimeout) throws IOException { RPC.setProtocolEngine(conf, InterDatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); rpcProxy = RPC.getProxy(InterDatanodeProtocolPB.class, RPC.getProtocolVersion(InterDatanodeProtocolPB.class), addr, ugi, conf, factory, socketTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java index 5eead67fa7b5d..bf72723071d26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; /** * Interface for a remote log which is only communicated with asynchronously. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java index f024b0e8c267d..684e7dd69fd40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java @@ -34,12 +34,12 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Maps; -import com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; /** * Wrapper around a set of Loggers, taking care of fanning out diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 3b3f89e7d9b5a..94e9456971296 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -52,21 +52,21 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.StopWatch; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.net.InetAddresses; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.common.util.concurrent.UncaughtExceptionHandlers; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.UncaughtExceptionHandlers; /** * Channel to a remote JournalNode using Hadoop IPC. @@ -235,13 +235,13 @@ protected QJournalProtocol createProxy() throws IOException { true); RPC.setProtocolEngine(confCopy, - QJournalProtocolPB.class, ProtobufRpcEngine.class); + QJournalProtocolPB.class, ProtobufRpcEngine2.class); return SecurityUtil.doAsLoginUser( new PrivilegedExceptionAction() { @Override public QJournalProtocol run() throws IOException { RPC.setProtocolEngine(confCopy, - QJournalProtocolPB.class, ProtobufRpcEngine.class); + QJournalProtocolPB.class, ProtobufRpcEngine2.class); QJournalProtocolPB pbproxy = RPC.getProxy( QJournalProtocolPB.class, RPC.getProtocolVersion(QJournalProtocolPB.class), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannelMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannelMetrics.java index fde6b99817674..6eef8ffd38620 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannelMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannelMetrics.java @@ -29,7 +29,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableQuantiles; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * The metrics for a journal from the writer's perspective. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java index 501a77ec1d738..e2a169aeb3c5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java @@ -24,17 +24,17 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeUnit; -import com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Timer; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.protobuf.Message; import org.apache.hadoop.thirdparty.protobuf.TextFormat; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumException.java index 446092ebe1cc2..1f60e3d468821 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumException.java @@ -22,8 +22,8 @@ import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Exception thrown when too many exceptions occur while gathering diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 11bf46077d794..354b250bcc1f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -58,10 +58,10 @@ import org.apache.hadoop.log.LogThrottlingHelper; import org.apache.hadoop.log.LogThrottlingHelper.LogAction; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.TextFormat; /** @@ -73,9 +73,9 @@ public class QuorumJournalManager implements JournalManager { static final Logger LOG = LoggerFactory.getLogger(QuorumJournalManager.class); // This config is not publicly exposed - static final String QJM_RPC_MAX_TXNS_KEY = + public static final String QJM_RPC_MAX_TXNS_KEY = "dfs.ha.tail-edits.qjm.rpc.max-txns"; - static final int QJM_RPC_MAX_TXNS_DEFAULT = 5000; + public static final int QJM_RPC_MAX_TXNS_DEFAULT = 5000; // Maximum number of transactions to fetch at a time when using the // RPC edit fetch mechanism diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/SegmentRecoveryComparator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/SegmentRecoveryComparator.java index 61b60aaad875d..4b2a518ac0dff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/SegmentRecoveryComparator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/SegmentRecoveryComparator.java @@ -23,9 +23,9 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; -import com.google.common.base.Preconditions; -import com.google.common.collect.ComparisonChain; -import com.google.common.primitives.Booleans; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Booleans; /** * Compares responses to the prepareRecovery RPC. This is responsible for diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java index b7d652395c631..7f82bff3376f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * A {@link Storage} implementation for the {@link JournalNode}. @@ -236,6 +236,10 @@ void format(NamespaceInfo nsInfo, boolean force) throws IOException { void analyzeStorage() throws IOException { this.state = sd.analyzeStorage(StartupOption.REGULAR, this); + refreshStorage(); + } + + void refreshStorage() throws IOException { if (state == StorageState.NORMAL) { readProperties(sd); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 70ed4c7159fa4..238a000afcfb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -71,10 +71,10 @@ import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.protobuf.TextFormat; /** @@ -264,9 +264,9 @@ void format(NamespaceInfo nsInfo, boolean force) throws IOException { */ @Override // Closeable public void close() throws IOException { - storage.close(); IOUtils.closeStream(committedTxnId); IOUtils.closeStream(curSegment); + storage.close(); } JNStorage getStorage() { @@ -1178,6 +1178,8 @@ public synchronized void doRollback() throws IOException { // directory will be renamed. It will be reopened lazily on next access. IOUtils.cleanupWithLogger(LOG, committedTxnId); storage.getJournalManager().doRollback(); + // HADOOP-17142: refresh properties after rollback performed. + storage.refreshStorage(); } synchronized void discardSegments(long startTxId) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalFaultInjector.java index cefb7b5294406..f55933fa3536b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalFaultInjector.java @@ -19,7 +19,7 @@ import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 3df69f1448ad5..82d463805390d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index 36f7faaedb01e..d13c98f5c0f1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.slf4j.Logger; import org.apache.hadoop.classification.InterfaceAudience; @@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.net.NetUtils; @@ -85,7 +85,7 @@ public class JournalNodeRpcServer implements QJournalProtocol, LOG.info("RPC server is binding to " + bindHost + ":" + addr.getPort()); RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); QJournalProtocolServerSideTranslatorPB translator = new QJournalProtocolServerSideTranslatorPB(this); BlockingService service = QJournalProtocolService diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java index dc352c5d367c2..b4997eacd0719 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -505,7 +505,7 @@ private class JournalNodeProxy { @Override public InterQJournalProtocol run() throws IOException { RPC.setProtocolEngine(confCopy, InterQJournalProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); InterQJournalProtocolPB interQJournalProtocolPB = RPC.getProxy( InterQJournalProtocolPB.class, RPC.getProtocolVersion(InterQJournalProtocolPB.class), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java index 3cd7fffc587aa..e0b84d75fb212 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java index e477eee8437f7..d81bc98ff8aea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java @@ -27,7 +27,7 @@ import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.StorageType; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java index c01ab56ca2053..1d393783565ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.security.token.block; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; @@ -44,10 +44,10 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Timer; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.HashMultiset; -import com.google.common.collect.Multiset; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultiset; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multiset; /** * BlockTokenSecretManager can be instantiated in 2 modes, master mode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 49986e9d9827a..68f3dd6d67f12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -49,8 +49,8 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; import org.apache.hadoop.security.token.delegation.DelegationKey; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.ByteString; /** @@ -191,7 +191,7 @@ public SecretManagerState( } } - public synchronized void loadSecretManagerState(SecretManagerState state) + public synchronized void loadSecretManagerState(SecretManagerState state, Counter counter) throws IOException { Preconditions.checkState(!running, "Can't load state from image in a running SecretManager."); @@ -211,6 +211,7 @@ public synchronized void loadSecretManagerState(SecretManagerState state) id.setSequenceNumber(t.getSequenceNumber()); id.setMasterKeyId(t.getMasterKeyId()); addPersistedDelegationToken(id, t.getExpiryDate()); + counter.increment(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java index 2810434609acf..bcf535740d152 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMap.java @@ -16,8 +16,8 @@ */ package org.apache.hadoop.hdfs.server.aliasmap; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java index f6ba4239d7110..2ba22b1a90ba7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java @@ -19,7 +19,7 @@ import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configurable; @@ -71,7 +71,7 @@ public InMemoryLevelDBAliasMapServer( public void start() throws IOException { RPC.setProtocolEngine(getConf(), AliasMapProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); AliasMapProtocolServerSideTranslatorPB aliasMapProtocolXlator = new AliasMapProtocolServerSideTranslatorPB(this); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index e8b49718fedf7..8d97d2e1ab866 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.balancer; -import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS; import java.io.IOException; @@ -36,7 +36,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -68,7 +69,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /**

    The balancer is a tool that balances disk space usage on an HDFS cluster * when some datanodes become full or when new empty nodes join the cluster. @@ -281,6 +282,9 @@ static int getFailedTimesSinceLastSuccessfulBalance() { */ Balancer(NameNodeConnector theblockpool, BalancerParameters p, Configuration conf) { + // NameNode configuration parameters for balancing + getInt(conf, DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_KEY, + DFSConfigKeys.DFS_NAMENODE_GETBLOCKS_MAX_QPS_DEFAULT); final long movedWinWidth = getLong(conf, DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT); @@ -290,10 +294,6 @@ static int getFailedTimesSinceLastSuccessfulBalance() { final int dispatcherThreads = getInt(conf, DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY, DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT); - final int maxConcurrentMovesPerNode = getInt(conf, - DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, - DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT); - final long getBlocksSize = getLongBytes(conf, DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_KEY, DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_DEFAULT); @@ -310,6 +310,13 @@ static int getFailedTimesSinceLastSuccessfulBalance() { DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_DEFAULT); + // DataNode configuration parameters for balancing + final int maxConcurrentMovesPerNode = getInt(conf, + DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, + DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT); + getLongBytes(conf, DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, + DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT); + this.nnc = theblockpool; this.dispatcher = new Dispatcher(theblockpool, p.getIncludedNodes(), @@ -589,35 +596,60 @@ void resetData(Configuration conf) { } static class Result { - final ExitStatus exitStatus; - final long bytesLeftToMove; - final long bytesBeingMoved; - final long bytesAlreadyMoved; + private final ExitStatus exitStatus; + private final long bytesLeftToMove; + private final long bytesBeingMoved; + private final long bytesAlreadyMoved; + private final long blocksMoved; Result(ExitStatus exitStatus, long bytesLeftToMove, long bytesBeingMoved, - long bytesAlreadyMoved) { + long bytesAlreadyMoved, long blocksMoved) { this.exitStatus = exitStatus; this.bytesLeftToMove = bytesLeftToMove; this.bytesBeingMoved = bytesBeingMoved; this.bytesAlreadyMoved = bytesAlreadyMoved; + this.blocksMoved = blocksMoved; + } + + public ExitStatus getExitStatus() { + return exitStatus; + } + + public long getBytesLeftToMove() { + return bytesLeftToMove; } - void print(int iteration, PrintStream out) { - out.printf("%-24s %10d %19s %18s %17s%n", + public long getBytesBeingMoved() { + return bytesBeingMoved; + } + + public long getBytesAlreadyMoved() { + return bytesAlreadyMoved; + } + + public long getBlocksMoved() { + return blocksMoved; + } + + void print(int iteration, NameNodeConnector nnc, PrintStream out) { + out.printf("%-24s %10d %19s %18s %17s %17s %s%n", DateFormat.getDateTimeInstance().format(new Date()), iteration, StringUtils.byteDesc(bytesAlreadyMoved), StringUtils.byteDesc(bytesLeftToMove), - StringUtils.byteDesc(bytesBeingMoved)); + StringUtils.byteDesc(bytesBeingMoved), + blocksMoved, + nnc.getNameNodeUri()); } } Result newResult(ExitStatus exitStatus, long bytesLeftToMove, long bytesBeingMoved) { return new Result(exitStatus, bytesLeftToMove, bytesBeingMoved, - dispatcher.getBytesMoved()); + dispatcher.getBytesMoved(), dispatcher.getBblocksMoved()); } Result newResult(ExitStatus exitStatus) { - return new Result(exitStatus, -1, -1, dispatcher.getBytesMoved()); + return new Result(exitStatus, -1, -1, dispatcher.getBytesMoved(), + dispatcher.getBblocksMoved()); } /** Run an iteration for all datanodes. */ @@ -652,8 +684,10 @@ Result runOneIteration() { System.out.println("No block can be moved. Exiting..."); return newResult(ExitStatus.NO_MOVE_BLOCK, bytesLeftToMove, bytesBeingMoved); } else { - LOG.info( "Will move " + StringUtils.byteDesc(bytesBeingMoved) + - " in this iteration"); + LOG.info("Will move {} in this iteration for {}", + StringUtils.byteDesc(bytesBeingMoved), nnc.toString()); + LOG.info("Total target DataNodes in this iteration: {}", + dispatcher.moveTasksTotal()); } /* For each pair of , start a thread that repeatedly @@ -688,7 +722,7 @@ Result runOneIteration() { * execute a {@link Balancer} to work through all datanodes once. */ static private int doBalance(Collection namenodes, - final BalancerParameters p, Configuration conf) + Collection nsIds, final BalancerParameters p, Configuration conf) throws IOException, InterruptedException { final long sleeptime = conf.getTimeDuration(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, @@ -704,14 +738,15 @@ static private int doBalance(Collection namenodes, LOG.info("excluded nodes = " + p.getExcludedNodes()); LOG.info("source nodes = " + p.getSourceNodes()); checkKeytabAndInit(conf); - System.out.println("Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved"); + System.out.println("Time Stamp Iteration#" + + " Bytes Already Moved Bytes Left To Move Bytes Being Moved" + + " NameNode"); List connectors = Collections.emptyList(); try { - connectors = NameNodeConnector.newNameNodeConnectors(namenodes, - Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf, - p.getMaxIdleIteration()); - + connectors = NameNodeConnector.newNameNodeConnectors(namenodes, nsIds, + Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf, + p.getMaxIdleIteration()); boolean done = false; for(int iteration = 0; !done; iteration++) { done = true; @@ -721,7 +756,7 @@ static private int doBalance(Collection namenodes, || p.getBlockPools().contains(nnc.getBlockpoolID())) { final Balancer b = new Balancer(nnc, p, conf); final Result r = b.runOneIteration(); - r.print(iteration, System.out); + r.print(iteration, nnc, System.out); // clean all lists b.resetData(conf); @@ -751,9 +786,15 @@ static private int doBalance(Collection namenodes, } static int run(Collection namenodes, final BalancerParameters p, - Configuration conf) throws IOException, InterruptedException { + Configuration conf) throws IOException, InterruptedException { + return run(namenodes, null, p, conf); + } + + static int run(Collection namenodes, Collection nsIds, + final BalancerParameters p, Configuration conf) + throws IOException, InterruptedException { if (!p.getRunAsService()) { - return doBalance(namenodes, p, conf); + return doBalance(namenodes, nsIds, p, conf); } if (!serviceRunning) { serviceRunning = true; @@ -772,7 +813,7 @@ static int run(Collection namenodes, final BalancerParameters p, while (serviceRunning) { try { - int retCode = doBalance(namenodes, p, conf); + int retCode = doBalance(namenodes, nsIds, p, conf); if (retCode < 0) { LOG.info("Balance failed, error code: " + retCode); failedTimesSinceLastSuccessfulBalance++; @@ -856,7 +897,8 @@ public int run(String[] args) { checkReplicationPolicyCompatibility(conf); final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); - return Balancer.run(namenodes, parse(args), conf); + final Collection nsIds = DFSUtilClient.getNameServiceIds(conf); + return Balancer.run(namenodes, nsIds, parse(args), conf); } catch (IOException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.IO_EXCEPTION.getExitCode(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index c222270882fc0..e19fbeb956fd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -83,8 +83,8 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** Dispatching block replica moves between datanodes. */ @InterfaceAudience.Private @@ -392,7 +392,7 @@ private void dispatch() { sendRequest(out, eb, accessToken); receiveResponse(in); - nnc.getBytesMoved().addAndGet(reportedBlock.getNumBytes()); + nnc.addBytesMoved(reportedBlock.getNumBytes()); target.getDDatanode().setHasSuccess(); LOG.info("Successfully moved " + this); } catch (IOException e) { @@ -1064,6 +1064,10 @@ long getBytesMoved() { return nnc.getBytesMoved().get(); } + long getBblocksMoved() { + return nnc.getBlocksMoved().get(); + } + long bytesToMove() { Preconditions.checkState( storageGroupMap.size() >= sources.size() + targets.size(), @@ -1083,6 +1087,14 @@ void add(Source source, StorageGroup target) { targets.add(target); } + public int moveTasksTotal() { + int b = 0; + for (Source src : sources) { + b += src.tasks.size(); + } + return b; + } + private boolean shouldIgnore(DatanodeInfo dn) { // ignore out-of-service nodes final boolean outOfService = !dn.isInService(); @@ -1164,12 +1176,13 @@ public boolean dispatchAndCheckContinue() throws InterruptedException { */ private long dispatchBlockMoves() throws InterruptedException { final long bytesLastMoved = getBytesMoved(); + final long blocksLastMoved = getBblocksMoved(); final Future[] futures = new Future[sources.size()]; int concurrentThreads = Math.min(sources.size(), ((ThreadPoolExecutor)dispatchExecutor).getCorePoolSize()); assert concurrentThreads > 0 : "Number of concurrent threads is 0."; - LOG.debug("Balancer concurrent dispatcher threads = {}", concurrentThreads); + LOG.info("Balancer concurrent dispatcher threads = {}", concurrentThreads); // Determine the size of each mover thread pool per target int threadsPerTarget = maxMoverThreads/targets.size(); @@ -1211,6 +1224,9 @@ public void run() { // wait for all reportedBlock moving to be done waitForMoveCompletion(targets); + LOG.info("Total bytes (blocks) moved in this iteration {} ({})", + StringUtils.byteDesc(getBytesMoved() - bytesLastMoved), + (getBblocksMoved() - blocksLastMoved)); return getBytesMoved() - bytesLastMoved; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index 2844ad5a94350..7f54c63303ca6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -25,14 +25,20 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.RateLimiter; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.RateLimiter; +import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -57,7 +63,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The class provides utilities for accessing a NameNode. @@ -100,6 +106,32 @@ public static List newNameNodeConnectors( return connectors; } + public static List newNameNodeConnectors( + Collection namenodes, Collection nsIds, String name, + Path idPath, Configuration conf, int maxIdleIterations) + throws IOException { + final List connectors = new ArrayList( + namenodes.size()); + Map uriToNsId = new HashMap<>(); + if (nsIds != null) { + for (URI uri : namenodes) { + for (String nsId : nsIds) { + if (uri.getAuthority().equals(nsId)) { + uriToNsId.put(uri, nsId); + } + } + } + } + for (URI uri : namenodes) { + String nsId = uriToNsId.get(uri); + NameNodeConnector nnc = new NameNodeConnector(name, uri, nsId, idPath, + null, conf, maxIdleIterations); + nnc.getKeyManager().startBlockKeyUpdater(); + connectors.add(nnc); + } + return connectors; + } + @VisibleForTesting public static void setWrite2IdFile(boolean write2IdFile) { NameNodeConnector.write2IdFile = write2IdFile; @@ -114,6 +146,14 @@ public static void checkOtherInstanceRunning(boolean toCheck) { private final String blockpoolID; private final BalancerProtocols namenode; + /** + * If set requestToStandby true, Balancer will getBlocks from + * Standby NameNode only and it can reduce the performance impact of Active + * NameNode, especially in a busy HA mode cluster. + */ + private boolean requestToStandby; + private String nsId; + private Configuration config; private final KeyManager keyManager; final AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false); @@ -122,6 +162,7 @@ public static void checkOtherInstanceRunning(boolean toCheck) { private OutputStream out; private final List targetPaths; private final AtomicLong bytesMoved = new AtomicLong(); + private final AtomicLong blocksMoved = new AtomicLong(); private final int maxNotChangedIterations; private int notChangedIterations = 0; @@ -149,6 +190,11 @@ public NameNodeConnector(String name, URI nameNodeUri, Path idPath, this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri, BalancerProtocols.class, fallbackToSimpleAuth).getProxy(); + this.requestToStandby = conf.getBoolean( + DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_KEY, + DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_DEFAULT); + this.config = conf; + this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf); final NamespaceInfo namespaceinfo = namenode.versionRequest(); @@ -167,6 +213,14 @@ public NameNodeConnector(String name, URI nameNodeUri, Path idPath, } } + public NameNodeConnector(String name, URI nameNodeUri, String nsId, + Path idPath, List targetPaths, + Configuration conf, int maxNotChangedIterations) + throws IOException { + this(name, nameNodeUri, idPath, targetPaths, conf, maxNotChangedIterations); + this.nsId = nsId; + } + public DistributedFileSystem getDistributedFileSystem() { return fs; } @@ -180,13 +234,62 @@ AtomicLong getBytesMoved() { return bytesMoved; } + AtomicLong getBlocksMoved() { + return blocksMoved; + } + + public void addBytesMoved(long numBytes) { + bytesMoved.addAndGet(numBytes); + blocksMoved.incrementAndGet(); + } + + public URI getNameNodeUri() { + return nameNodeUri; + } + /** @return blocks with locations. */ public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long minBlockSize) throws IOException { if (getBlocksRateLimiter != null) { getBlocksRateLimiter.acquire(); } - return namenode.getBlocks(datanode, size, minBlockSize); + boolean isRequestStandby = false; + NamenodeProtocol nnproxy = null; + try { + if (requestToStandby && nsId != null + && HAUtil.isHAEnabled(config, nsId)) { + List namenodes = + HAUtil.getProxiesForAllNameNodesInNameservice(config, nsId); + for (ClientProtocol proxy : namenodes) { + try { + if (proxy.getHAServiceState().equals( + HAServiceProtocol.HAServiceState.STANDBY)) { + NamenodeProtocol sbn = NameNodeProxies.createNonHAProxy( + config, RPC.getServerAddress(proxy), NamenodeProtocol.class, + UserGroupInformation.getCurrentUser(), false).getProxy(); + nnproxy = sbn; + isRequestStandby = true; + break; + } + } catch (Exception e) { + // Ignore the exception while connecting to a namenode. + LOG.debug("Error while connecting to namenode", e); + } + } + if (nnproxy == null) { + LOG.warn("Request #getBlocks to Standby NameNode but meet exception," + + " will fallback to normal way."); + nnproxy = namenode; + } + } else { + nnproxy = namenode; + } + return nnproxy.getBlocks(datanode, size, minBlockSize); + } finally { + if (isRequestStandby) { + LOG.info("Request #getBlocks to Standby NameNode success."); + } + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java index 21c110f5ab556..6926c3e01c448 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java @@ -27,7 +27,7 @@ import java.util.Random; import java.util.Set; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -87,9 +87,9 @@ protected DatanodeDescriptor chooseDataNode(final String scope, Preconditions.checkArgument(clusterMap instanceof DFSNetworkTopology); DFSNetworkTopology dfsClusterMap = (DFSNetworkTopology)clusterMap; DatanodeDescriptor a = (DatanodeDescriptor) dfsClusterMap - .chooseRandomWithStorageType(scope, excludedNode, type); + .chooseRandomWithStorageTypeTwoTrial(scope, excludedNode, type); DatanodeDescriptor b = (DatanodeDescriptor) dfsClusterMap - .chooseRandomWithStorageType(scope, excludedNode, type); + .chooseRandomWithStorageTypeTwoTrial(scope, excludedNode, type); return select(a, b, false); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java index bec6ec8368120..0ac1d53e14267 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.HdfsConstants; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index dc6cf3266a5fc..c6a7bb52b7045 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.NoSuchElementException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java index 7378e6f21b765..f830678b429b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index ce186a7b3d1d2..5a133412a85d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index ad61c716a8019..7461d46dda028 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -126,8 +126,8 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1856,6 +1856,16 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, // In case of 3, rbw block will be deleted and valid block can be replicated if (hasEnoughLiveReplicas || hasMoreCorruptReplicas || corruptedDuringWrite) { + if (b.getStored().isStriped()) { + // If the block is an EC block, the whole block group is marked + // corrupted, so if this block is getting deleted, remove the block + // from corrupt replica map explicitly, since removal of the + // block from corrupt replicas may be delayed if the blocks are on + // stale storage due to failover or any other reason. + corruptReplicas.removeFromCorruptReplicasMap(b.getStored(), node); + BlockInfoStriped blk = (BlockInfoStriped) getStoredBlock(b.getStored()); + blk.removeStorage(storageInfo); + } // the block is over-replicated so invalidate the replicas immediately invalidateBlock(b, node, numberOfReplicas); } else if (isPopulatingReplQueues()) { @@ -2517,7 +2527,7 @@ void processPendingReconstructions() { * with the most up-to-date block information (e.g. genstamp). */ BlockInfo bi = blocksMap.getStoredBlock(timedOutItems[i]); - if (bi == null) { + if (bi == null || bi.isDeleted()) { continue; } NumberReplicas num = countNodes(timedOutItems[i]); @@ -2745,6 +2755,7 @@ public boolean processReport(final DatanodeID nodeID, storageInfo = node.updateStorage(storage); } if (namesystem.isInStartupSafeMode() + && !StorageType.PROVIDED.equals(storageInfo.getStorageType()) && storageInfo.getBlockReportCount() > 0) { blockLog.info("BLOCK* processReport 0x{}: " + "discarded non-initial block report from {}" @@ -3156,10 +3167,11 @@ private void reportDiffSortedInner( // If the block is an out-of-date generation stamp or state, // but we're the standby, we shouldn't treat it as corrupt, // but instead just queue it for later processing. - // TODO: Pretty confident this should be s/storedBlock/block below, - // since we should be postponing the info of the reported block, not - // the stored block. See HDFS-6289 for more context. - queueReportedBlock(storageInfo, storedBlock, reportedState, + // Storing the reported block for later processing, as that is what + // comes from the IBR / FBR and hence what we should use to compare + // against the memory state. + // See HDFS-6289 and HDFS-15422 for more context. + queueReportedBlock(storageInfo, replica, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { toCorrupt.add(c); @@ -4221,10 +4233,11 @@ private boolean processAndHandleReportedBlock( // If the block is an out-of-date generation stamp or state, // but we're the standby, we shouldn't treat it as corrupt, // but instead just queue it for later processing. - // TODO: Pretty confident this should be s/storedBlock/block below, - // since we should be postponing the info of the reported block, not - // the stored block. See HDFS-6289 for more context. - queueReportedBlock(storageInfo, storedBlock, reportedState, + // Storing the reported block for later processing, as that is what + // comes from the IBR / FBR and hence what we should use to compare + // against the memory state. + // See HDFS-6289 and HDFS-15422 for more context. + queueReportedBlock(storageInfo, block, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { markBlockAsCorrupt(c, storageInfo, node); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerFaultInjector.java index 957c5c0c3733c..a529eca6d6ceb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerFaultInjector.java @@ -19,7 +19,7 @@ import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java index aecdb59df066f..cd4c521807ea2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java @@ -35,8 +35,8 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.util.Daemon; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index 563183882765c..07df43b83dcf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -25,7 +25,7 @@ import java.util.Map; import java.util.Set; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.AddBlockFlag; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index a1ea63de22fd7..a1a83b042d0ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -22,7 +22,7 @@ import java.util.*; import java.util.concurrent.TimeUnit; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.AddBlockFlag; @@ -35,7 +35,7 @@ import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The class is responsible for choosing the desired number of targets diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java index b204450491a7b..dad877fdc76fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java @@ -237,9 +237,8 @@ public BlockPlacementStatus verifyBlockPlacement(DatanodeInfo[] locs, // only one rack return new BlockPlacementStatusDefault(1, 1, 1); } - // 1. Check that all locations are different. - // 2. Count locations on different racks. - Set racks = new TreeSet<>(); + // Count locations on different racks. + Set racks = new HashSet<>(); for (DatanodeInfo dn : locs) { racks.add(dn.getNetworkLocation()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java index 194f6ba9c874a..39f15191534d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java @@ -46,6 +46,11 @@ protected BlockPlacementPolicyWithNodeGroup() { public void initialize(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap, Host2NodesMap host2datanodeMap) { + if (!(clusterMap instanceof NetworkTopologyWithNodeGroup)) { + throw new IllegalArgumentException( + "Configured cluster topology should be " + + NetworkTopologyWithNodeGroup.class.getName()); + } super.initialize(conf, stats, clusterMap, host2datanodeMap); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java index 2a4b6e8455226..3560a36dde1bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java index 5af1bf281b150..ca08fe9a5398c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttr; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java index a871390ae2274..3ce5ef07acdca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.Block; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index 35e4a2e92b89b..eab58124cb11c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -55,7 +55,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Scans the namesystem, scheduling blocks to be cached as appropriate. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java index d607789420c20..662e2e471dfbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; -import com.google.common.collect.UnmodifiableIterator; -import com.google.common.collect.Iterables; -import com.google.common.collect.Collections2; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.UnmodifiableIterator; +import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -40,7 +39,7 @@ import java.util.Iterator; import java.util.Map; -import com.google.common.base.Predicate; + import org.apache.hadoop.hdfs.util.CombinedHostsFileReader; @@ -82,37 +81,26 @@ synchronized void add(InetAddress addr, // If the includes list is empty, act as if everything is in the // includes list. synchronized boolean isIncluded(final InetSocketAddress address) { - return emptyInServiceNodeLists || Iterables.any( - allDNs.get(address.getAddress()), - new Predicate() { - public boolean apply(DatanodeAdminProperties input) { - return input.getPort() == 0 || - input.getPort() == address.getPort(); - } - }); + return emptyInServiceNodeLists || allDNs.get(address.getAddress()) + .stream().anyMatch( + input -> input.getPort() == 0 || + input.getPort() == address.getPort()); } synchronized boolean isExcluded(final InetSocketAddress address) { - return Iterables.any(allDNs.get(address.getAddress()), - new Predicate() { - public boolean apply(DatanodeAdminProperties input) { - return input.getAdminState().equals( - AdminStates.DECOMMISSIONED) && - (input.getPort() == 0 || - input.getPort() == address.getPort()); - } - }); + return allDNs.get(address.getAddress()).stream().anyMatch( + input -> input.getAdminState().equals( + AdminStates.DECOMMISSIONED) && + (input.getPort() == 0 || + input.getPort() == address.getPort())); } synchronized String getUpgradeDomain(final InetSocketAddress address) { - Iterable datanode = Iterables.filter( - allDNs.get(address.getAddress()), - new Predicate() { - public boolean apply(DatanodeAdminProperties input) { - return (input.getPort() == 0 || - input.getPort() == address.getPort()); - } - }); + Iterable datanode = + allDNs.get(address.getAddress()).stream().filter( + input -> (input.getPort() == 0 || + input.getPort() == address.getPort())).collect( + Collectors.toList()); return datanode.iterator().hasNext() ? datanode.iterator().next().getUpgradeDomain() : null; } @@ -127,36 +115,22 @@ public Iterator iterator() { } Iterable getExcludes() { - return new Iterable() { - @Override - public Iterator iterator() { - return new HostIterator( - Collections2.filter(allDNs.entries(), - new Predicate>() { - public boolean apply(java.util.Map.Entry entry) { - return entry.getValue().getAdminState().equals( - AdminStates.DECOMMISSIONED); - } - } - )); - } - }; + return () -> new HostIterator( + allDNs.entries().stream().filter( + entry -> entry.getValue().getAdminState().equals( + AdminStates.DECOMMISSIONED)).collect( + Collectors.toList())); } synchronized long getMaintenanceExpireTimeInMS( final InetSocketAddress address) { - Iterable datanode = Iterables.filter( - allDNs.get(address.getAddress()), - new Predicate() { - public boolean apply(DatanodeAdminProperties input) { - return input.getAdminState().equals( + Iterable datanode = + allDNs.get(address.getAddress()).stream().filter( + input -> input.getAdminState().equals( AdminStates.IN_MAINTENANCE) && (input.getPort() == 0 || - input.getPort() == address.getPort()); - } - }); + input.getPort() == address.getPort())).collect( + Collectors.toList()); // if DN isn't set to maintenance state, ignore MaintenanceExpireTimeInMS // set in the config. return datanode.iterator().hasNext() ? diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index fc31584ae694c..fdc8bb7491c15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.ipc.Server; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Stores information about all corrupt blocks in the File System. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java index af2c12f35c78a..c04f3daabf70e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeFile; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java index a5650d1c4865b..b372e738b1dfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.namenode.INode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java index 0771c28243a3c..799e0d5278235 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.util.Time.monotonicNow; import java.util.Queue; @@ -33,8 +33,8 @@ import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Manages decommissioning and maintenance state for DataNodes. A background diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 3fa9b3ad51dd2..c89f0efa1cb64 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -30,7 +30,7 @@ import java.util.Queue; import java.util.Set; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -237,7 +237,8 @@ public Type getType() { */ public DatanodeDescriptor(DatanodeID nodeID) { super(nodeID); - updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0, null); + setLastUpdate(Time.now()); + setLastUpdateMonotonic(Time.monotonicNow()); } /** @@ -248,7 +249,8 @@ public DatanodeDescriptor(DatanodeID nodeID) { public DatanodeDescriptor(DatanodeID nodeID, String networkLocation) { super(nodeID, networkLocation); - updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0, null); + setLastUpdate(Time.now()); + setLastUpdateMonotonic(Time.monotonicNow()); } public CachedBlocksList getPendingCached() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index f5cf1d5744b07..22750ec0aeeba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -20,9 +20,9 @@ import static org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION; import static org.apache.hadoop.util.Time.monotonicNow; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.net.InetAddresses; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -137,6 +137,9 @@ public class DatanodeManager { /** Whether or not to consider lad for reading. */ private final boolean readConsiderLoad; + /** Whether or not to consider storageType for reading. */ + private final boolean readConsiderStorageType; + /** * Whether or not to avoid using stale DataNodes for writing. * Note that, even if this is configured, the policy may be @@ -320,6 +323,17 @@ public class DatanodeManager { this.readConsiderLoad = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY, DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_DEFAULT); + this.readConsiderStorageType = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY, + DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_DEFAULT); + if (readConsiderLoad && readConsiderStorageType) { + LOG.warn( + "{} and {} are incompatible and only one can be enabled. " + + "Both are currently enabled. {} will be ignored.", + DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY, + DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY, + DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY); + } this.avoidStaleDataNodesForWrite = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT); @@ -524,7 +538,7 @@ private void sortLocatedBlock(final LocatedBlock lb, String targetHost, } } - DatanodeInfo[] di = lb.getLocations(); + DatanodeInfoWithStorage[] di = lb.getLocations(); // Move decommissioned/stale datanodes to the bottom Arrays.sort(di, comparator); @@ -547,11 +561,15 @@ private void sortLocatedBlock(final LocatedBlock lb, String targetHost, lb.updateCachedStorageInfo(); } - private Consumer> createSecondaryNodeSorter() { - Consumer> secondarySort = - list -> Collections.shuffle(list); + private Consumer> createSecondaryNodeSorter() { + Consumer> secondarySort = null; + if (readConsiderStorageType) { + Comparator comp = + Comparator.comparing(DatanodeInfoWithStorage::getStorageType); + secondarySort = list -> Collections.sort(list, comp); + } if (readConsiderLoad) { - Comparator comp = + Comparator comp = Comparator.comparingInt(DatanodeInfo::getXceiverCount); secondarySort = list -> Collections.sort(list, comp); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index 3a56ef16c8efd..ee20ada2e8f87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.util.FoldedTreeSet; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A Datanode has one or more storages. A storage in the Datanode is represented diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessRedundancyMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessRedundancyMap.java index ccdcf5451ccd8..41bb7d3428c39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessRedundancyMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessRedundancyMap.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.slf4j.Logger; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Maps a datnode to the set of excess redundancy details. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java index 9e4d867a0bfc4..5da47c4b2a8a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Manage the heartbeats received from datanodes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java index 4ead0ba6f704a..57b690262a6d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java index cf7cfac95ab8e..af0c92df5e272 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java @@ -17,15 +17,12 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.base.Function; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Iterators; -import com.google.common.collect.Multimap; -import com.google.common.collect.UnmodifiableIterator; -import javax.annotation.Nullable; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.UnmodifiableIterator; + import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Collection; @@ -101,14 +98,16 @@ public InetSocketAddress next() { @Override public String toString() { StringBuilder sb = new StringBuilder("HostSet("); - Joiner.on(",").appendTo(sb, Iterators.transform(iterator(), - new Function() { - @Override - public String apply(@Nullable InetSocketAddress addr) { - assert addr != null; - return addr.getAddress().getHostAddress() + ":" + addr.getPort(); - } - })); - return sb.append(")").toString(); + Iterator iter = iterator(); + String sep = ""; + while (iter.hasNext()) { + InetSocketAddress addr = iter.next(); + sb.append(sep); + sb.append(addr.getAddress().getHostAddress()); + sb.append(':'); + sb.append(addr.getPort()); + sep = ","; + } + return sb.append(')').toString(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index bbe729c3332cc..1ce967a4f3703 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -39,7 +39,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.hdfs.DFSUtil; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Keeps a Collection for every named machine containing blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java index 8cf9dd40ca6d8..d719e937f2e86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.ArrayList; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -382,17 +383,18 @@ boolean remove(BlockInfo block, int priLevel, int oldExpectedReplicas) { } else { // Try to remove the block from all queues if the block was // not found in the queue for the given priority level. + boolean found = false; for (int i = 0; i < LEVEL; i++) { if (i != priLevel && priorityQueues.get(i).remove(block)) { NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.LowRedundancyBlock.remove: Removing block" + " {} from priority queue {}", block, i); decrementBlockStat(block, i, oldExpectedReplicas); - return true; + found = true; } } + return found; } - return false; } private void decrementBlockStat(BlockInfo blockInfo, int priLevel, @@ -499,6 +501,8 @@ synchronized List> chooseLowRedundancyBlocks( * the block count is met or iteration reaches the end of the lowest priority * list, in which case bookmarks for each block list are reset to the heads * of their respective lists. + * If a block is deleted (has invalid bcId), it will be removed from the low + * redundancy queues. * * @param blocksToProcess - number of blocks to fetch from low redundancy * blocks. @@ -514,21 +518,32 @@ synchronized List> chooseLowRedundancyBlocks( int count = 0; int priority = 0; + HashSet toRemove = new HashSet<>(); for (; count < blocksToProcess && priority < LEVEL; priority++) { - if (priority == QUEUE_WITH_CORRUPT_BLOCKS) { - // do not choose corrupted blocks. - continue; - } - // Go through all blocks that need reconstructions with current priority. // Set the iterator to the first unprocessed block at this priority level + // We do not want to skip QUEUE_WITH_CORRUPT_BLOCKS because we still need + // to look for deleted blocks if any. + final boolean inCorruptLevel = (QUEUE_WITH_CORRUPT_BLOCKS == priority); final Iterator i = priorityQueues.get(priority).getBookmark(); final List blocks = new LinkedList<>(); - blocksToReconstruct.add(blocks); - // Loop through all remaining blocks in the list. + if (!inCorruptLevel) { + blocksToReconstruct.add(blocks); + } for(; count < blocksToProcess && i.hasNext(); count++) { - blocks.add(i.next()); + BlockInfo block = i.next(); + if (block.isDeleted()) { + toRemove.add(block); + continue; + } + if (!inCorruptLevel) { + blocks.add(block); + } + } + for (BlockInfo bInfo : toRemove) { + remove(bInfo, priority); } + toRemove.clear(); } if (priority == LEVEL || resetIterators) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java index 6e9dfa2c0089e..8a8501f4aeee5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * In the Standby Node, we can receive messages about blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java index 6e1af5729aae5..acf05986397e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java @@ -29,7 +29,7 @@ import java.util.List; import java.util.Map; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.util.Daemon; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingRecoveryBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingRecoveryBlocks.java index 3f5f27c819000..8a432cead742b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingRecoveryBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingRecoveryBlocks.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.util.Time; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java index c8c9bb5d1cc74..31ab66d93f6b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java @@ -29,7 +29,7 @@ import java.util.UUID; import java.util.concurrent.ConcurrentSkipListMap; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java index 894121eeee7d0..08ebf8e481d22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java @@ -23,10 +23,10 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.google.common.primitives.Doubles; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Doubles; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java index 03a6918152f40..5b30b738c7ab5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java @@ -22,9 +22,9 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 78d4289a047cb..b6b4463df108f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HostRestrictingAuthorizationFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HostRestrictingAuthorizationFilter.java index 1a51b46e585ea..e9f1cf09e86d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HostRestrictingAuthorizationFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HostRestrictingAuthorizationFilter.java @@ -229,9 +229,14 @@ public void handleInteraction(HttpInteraction interaction) throws IOException, ServletException { final String address = interaction.getRemoteAddr(); final String query = interaction.getQueryString(); - final String path = - interaction.getRequestURI() - .substring(WebHdfsFileSystem.PATH_PREFIX.length()); + final String uri = interaction.getRequestURI(); + if (!uri.startsWith(WebHdfsFileSystem.PATH_PREFIX)) { + LOG.trace("Rejecting interaction; wrong URI: {}", uri); + interaction.sendError(HttpServletResponse.SC_NOT_FOUND, + "The request URI must start with " + WebHdfsFileSystem.PATH_PREFIX); + return; + } + final String path = uri.substring(WebHdfsFileSystem.PATH_PREFIX.length()); String user = interaction.getRemoteUser(); LOG.trace("Got request user: {}, remoteIp: {}, query: {}, path: {}", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index e7da44e689a60..ea10f011f9581 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -53,8 +53,8 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; -import com.google.common.base.Charsets; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java index 28871e5da32a0..23911c0ac74ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** * Common class for storage information. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java index e9f9bfb830cdc..14cc1c4576d85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java @@ -36,8 +36,8 @@ import java.util.Map; import java.util.Set; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java index 4d65142c95848..41b5d33785cea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java @@ -54,7 +54,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class is used for block maps stored as text files, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 3199e53890f30..40046b0d8c454 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; @@ -206,6 +206,7 @@ String getBlockPoolId(boolean quiet) { if (id != null) { return id; } + DataNodeFaultInjector.get().delayWhenOfferServiceHoldLock(); readLock(); try { if (bpNSInfo != null) { @@ -382,6 +383,7 @@ void verifyAndSetNamespaceInfo(BPServiceActor actor, NamespaceInfo nsInfo) } try { + DataNodeFaultInjector.get().delayWhenOfferServiceHoldLock(); if (setNamespaceInfo(nsInfo) == null) { boolean success = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index a436c94dc206d..49156c251a322 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -71,8 +71,8 @@ import org.apache.hadoop.util.VersionUtil; import org.slf4j.Logger; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** * A thread per active or standby namenode to perform: @@ -544,7 +544,7 @@ HeartbeatResponse sendHeartBeat(boolean requestBlockReportLease) dn.getFSDataset().getCacheCapacity(), dn.getFSDataset().getCacheUsed(), dn.getXmitsInProgress(), - dn.getXceiverCount(), + dn.getActiveTransferThreadCount(), numFailedVolumes, volumeFailureSummary, requestBlockReportLease, @@ -572,11 +572,11 @@ void start() { } bpThread = new Thread(this); bpThread.setDaemon(true); // needed for JUnit testing - bpThread.start(); if (lifelineSender != null) { lifelineSender.start(); } + bpThread.start(); } private String formatThreadName( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockChecksumHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockChecksumHelper.java index 3388855f8fbdf..1895b449c6945 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockChecksumHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockChecksumHelper.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSUtilClient; @@ -480,8 +480,9 @@ void compute() throws IOException { // Before populating the blockChecksum at this index, record the byte // offset where it will begin. blockChecksumPositions[idx] = blockChecksumBuf.getLength(); + ExtendedBlock block = null; try { - ExtendedBlock block = getInternalBlock(numDataUnits, idx); + block = getInternalBlock(numDataUnits, idx); LiveBlockInfo liveBlkInfo = liveDns.get((byte) idx); if (liveBlkInfo == null) { @@ -502,7 +503,9 @@ void compute() throws IOException { break; // done with the computation, simply return. } } catch (IOException e) { - LOG.warn("Failed to get the checksum", e); + LOG.warn("Failed to get the checksum for block {} at index {} " + + "in blockGroup {}", block, idx, blockGroup, e); + throw e; } } @@ -700,24 +703,25 @@ private void recalculateChecksum(int errBlkIndex, long blockLength) blockGroup, ecPolicy, blockIndices, datanodes, errIndices); BlockChecksumType groupChecksumType = getBlockChecksumOptions().getBlockChecksumType(); - final StripedBlockChecksumReconstructor checksumRecon = + try (StripedBlockChecksumReconstructor checksumRecon = groupChecksumType == BlockChecksumType.COMPOSITE_CRC ? new StripedBlockChecksumCompositeCrcReconstructor( getDatanode().getErasureCodingWorker(), stripedReconInfo, blockChecksumBuf, blockLength) : new StripedBlockChecksumMd5CrcReconstructor( getDatanode().getErasureCodingWorker(), stripedReconInfo, - blockChecksumBuf, blockLength); - checksumRecon.reconstruct(); - - DataChecksum checksum = checksumRecon.getChecksum(); - long crcPerBlock = checksum.getChecksumSize() <= 0 ? 0 - : checksumRecon.getChecksumDataLen() / checksum.getChecksumSize(); - setOrVerifyChecksumProperties(errBlkIndex, - checksum.getBytesPerChecksum(), crcPerBlock, - checksum.getChecksumType()); - LOG.debug("Recalculated checksum for the block index:{}, checksum={}", - errBlkIndex, checksumRecon.getDigestObject()); + blockChecksumBuf, blockLength)) { + checksumRecon.reconstruct(); + + DataChecksum checksum = checksumRecon.getChecksum(); + long crcPerBlock = checksum.getChecksumSize() <= 0 ? 0 + : checksumRecon.getChecksumDataLen() / checksum.getChecksumSize(); + setOrVerifyChecksumProperties(errBlkIndex, + checksum.getBytesPerChecksum(), crcPerBlock, + checksum.getChecksumType()); + LOG.debug("Recalculated checksum for the block index:{}, checksum={}", + errBlkIndex, checksumRecon.getDigestObject()); + } } private void setOrVerifyChecksumProperties(int blockIdx, int bpc, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java index 9a7b6bcf7e52e..390577f8b54bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java @@ -29,11 +29,11 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index 539baf114d6fe..5c8e6f48ce70d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -47,9 +47,9 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.util.Daemon; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Manages storage for the set of BlockPoolSlices which share a particular diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 2231aeac300a8..cc9551d1d1d3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -64,7 +64,7 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED; import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.SYNC_FILE_RANGE_WRITE; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; /** A class that receives a block and writes to its own disk, meanwhile @@ -1368,6 +1368,7 @@ public void close() { */ @Override public void run() { + datanode.metrics.incrDataNodePacketResponderCount(); boolean lastPacketInBlock = false; final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; while (isRunning() && !lastPacketInBlock) { @@ -1505,6 +1506,9 @@ public void run() { } } } + // Any exception will be caught and processed in the previous loop, so we + // will always arrive here when the thread exiting + datanode.metrics.decrDataNodePacketResponderCount(); LOG.info(myString + " terminating"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java index db52d073fe387..d4687e8331adf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -119,6 +119,7 @@ protected void recover() throws IOException { List syncList = new ArrayList<>(locs.length); int errorCount = 0; int candidateReplicaCnt = 0; + DataNodeFaultInjector.get().delay(); // Check generation stamps, replica size and state. Replica must satisfy // the following criteria to be included in syncList for recovery: @@ -600,17 +601,22 @@ public Daemon recoverBlocks(final String who, Daemon d = new Daemon(datanode.threadGroup, new Runnable() { @Override public void run() { - for(RecoveringBlock b : blocks) { - try { - logRecoverBlock(who, b); - if (b.isStriped()) { - new RecoveryTaskStriped((RecoveringStripedBlock) b).recover(); - } else { - new RecoveryTaskContiguous(b).recover(); + datanode.metrics.incrDataNodeBlockRecoveryWorkerCount(); + try { + for (RecoveringBlock b : blocks) { + try { + logRecoverBlock(who, b); + if (b.isStriped()) { + new RecoveryTaskStriped((RecoveringStripedBlock) b).recover(); + } else { + new RecoveryTaskContiguous(b).recover(); + } + } catch (IOException e) { + LOG.warn("recover Block: {} FAILED: {}", b, e); } - } catch (IOException e) { - LOG.warn("recoverBlocks FAILED: " + b, e); } + } finally { + datanode.metrics.decrDataNodeBlockRecoveryWorkerCount(); } } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java index 6b1b96fb02655..485cf00152a47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java @@ -18,8 +18,12 @@ package org.apache.hadoop.hdfs.server.datanode; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT; @@ -28,11 +32,11 @@ import java.util.TreeMap; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.VolumeScanner.ScanResultHandler; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; @@ -66,6 +70,12 @@ public class BlockScanner { */ private Conf conf; + /** + * Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop + * inside {@link #removeAllVolumeScanners}. + */ + private long joinVolumeScannersTimeOutMs; + @VisibleForTesting void setConf(Conf conf) { this.conf = conf; @@ -112,6 +122,7 @@ static class Conf { final long maxStalenessMs; final long scanPeriodMs; final long cursorSaveMs; + final boolean skipRecentAccessed; final Class resultHandler; private static long getUnitTestLong(Configuration conf, String key, @@ -163,6 +174,9 @@ private static long getConfiguredScanPeriodMs(Configuration conf) { this.cursorSaveMs = Math.max(0L, getUnitTestLong(conf, INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS, INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS_DEFAULT)); + this.skipRecentAccessed = conf.getBoolean( + DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED, + DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED_DEFAULT); if (allowUnitTestSettings) { this.resultHandler = (Class) conf.getClass(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER, @@ -179,6 +193,9 @@ public BlockScanner(DataNode datanode) { public BlockScanner(DataNode datanode, Configuration conf) { this.datanode = datanode; + setJoinVolumeScannersTimeOutMs( + conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY, + DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_DEFAULT)); this.conf = new Conf(conf); if (isEnabled()) { LOG.info("Initialized block scanner with targetBytesPerSec {}", @@ -198,6 +215,13 @@ public boolean isEnabled() { return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0); } + /** + * Returns true if there is any scanner thread registered. + */ + public synchronized boolean hasAnyRegisteredScanner() { + return !scanners.isEmpty(); + } + /** * Set up a scanner for the given block pool and volume. * @@ -262,7 +286,10 @@ public synchronized void removeVolumeScanner(FsVolumeSpi volume) { /** * Stops and removes all volume scanners. * - * This function will block until all the volume scanners have stopped. + * This function is called on shutdown. It will return even if some of + * the scanners don't terminate in time. Since the scanners are daemon + * threads and do not alter the block content, it is safe to ignore + * such conditions on shutdown. */ public synchronized void removeAllVolumeScanners() { for (Entry entry : scanners.entrySet()) { @@ -270,7 +297,7 @@ public synchronized void removeAllVolumeScanners() { } for (Entry entry : scanners.entrySet()) { Uninterruptibles.joinUninterruptibly(entry.getValue(), - 5, TimeUnit.MINUTES); + getJoinVolumeScannersTimeOutMs(), TimeUnit.MILLISECONDS); } scanners.clear(); } @@ -346,6 +373,14 @@ synchronized void markSuspectBlock(String storageId, ExtendedBlock block) { scanner.markSuspectBlock(block); } + public long getJoinVolumeScannersTimeOutMs() { + return joinVolumeScannersTimeOutMs; + } + + public void setJoinVolumeScannersTimeOutMs(long joinScannersTimeOutMs) { + this.joinVolumeScannersTimeOutMs = joinScannersTimeOutMs; + } + @InterfaceAudience.Private public static class Servlet extends HttpServlet { private static final long serialVersionUID = 1L; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 6102a592c2661..6ea86505a9f98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -55,8 +55,8 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED; import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_SEQUENTIAL; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index d390c1e54232a..4d2fba45735bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -188,7 +188,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.ReadaheadPool; import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.util.MBeans; @@ -222,13 +222,13 @@ import org.apache.htrace.core.Tracer; import org.eclipse.jetty.util.ajax.JSON; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.slf4j.Logger; @@ -1015,7 +1015,7 @@ private void initIpcServer() throws IOException { // Add all the RPC protocols that the Datanode implements RPC.setProtocolEngine(getConf(), ClientDatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ClientDatanodeProtocolServerSideTranslatorPB clientDatanodeProtocolXlator = new ClientDatanodeProtocolServerSideTranslatorPB(this); BlockingService service = ClientDatanodeProtocolService @@ -1661,7 +1661,9 @@ void shutdownBlockPool(BPOfferService bpos) { // a block pool id String bpId = bpos.getBlockPoolId(); - blockScanner.disableBlockPoolId(bpId); + if (blockScanner.hasAnyRegisteredScanner()) { + blockScanner.disableBlockPoolId(bpId); + } if (data != null) { data.shutdownBlockPool(bpId); @@ -2146,6 +2148,8 @@ public void shutdown() { } if (metrics != null) { metrics.setDataNodeActiveXceiversCount(0); + metrics.setDataNodePacketResponderCount(0); + metrics.setDataNodeBlockRecoveryWorkerCount(0); } // IPC server needs to be shutdown late in the process, otherwise @@ -2244,7 +2248,20 @@ private void handleDiskError(String failedVolumes, int failedNumber) { /** Number of concurrent xceivers per node. */ @Override // DataNodeMXBean public int getXceiverCount() { - return threadGroup == null ? 0 : threadGroup.activeCount(); + if (metrics == null) { + return 0; + } + return metrics.getDataNodeActiveXceiverCount(); + } + + @Override // DataNodeMXBean + public int getActiveTransferThreadCount() { + if (metrics == null) { + return 0; + } + return metrics.getDataNodeActiveXceiverCount() + + metrics.getDataNodePacketResponderCount() + + metrics.getDataNodeBlockRecoveryWorkerCount(); } @Override // DataNodeMXBean diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java index c031f6c4c57e9..e4f732cc4e755 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; @@ -96,6 +96,28 @@ public void throwTooManyOpenFiles() throws FileNotFoundException { */ public void stripedBlockReconstruction() throws IOException {} + /** + * Used as a hook to inject failure in erasure coding checksum reconstruction + * process. + */ + public void stripedBlockChecksumReconstruction() throws IOException {} + + /** + * Used as a hook to inject latency when read block + * in erasure coding reconstruction process. + */ + public void delayBlockReader() {} + + /** + * Used as a hook to inject intercept when free the block reader buffer. + */ + public void interceptFreeBlockReaderBuffer() {} + + /** + * Used as a hook to inject intercept When finish reading from block. + */ + public void interceptBlockReader() {} + /** * Used as a hook to inject intercept when BPOfferService hold lock. */ @@ -105,4 +127,9 @@ public void delayWhenOfferServiceHoldLock() {} * Used as a hook to inject intercept when re-register. */ public void blockUtilSendFullBlockReport() {} + + /** + * Just delay a while. + */ + public void delay() {} } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java index 9d11e1488479b..7a8f59bb667a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java @@ -104,11 +104,15 @@ public interface DataNodeMXBean { public String getClusterId(); /** - * Returns an estimate of the number of Datanode threads - * actively transferring blocks. + * Returns the number of active xceivers. */ public int getXceiverCount(); + /** + * Returns the number of Datanode threads actively transferring blocks. + */ + int getActiveTransferThreadCount(); + /** * Returns an estimate of the number of data replication/reconstruction tasks * running currently. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 2447fd7137236..e917b77ae61ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -62,10 +62,10 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.util.Daemon; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ComparisonChain; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * Data storage information file. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 9c885fc4ab255..7df5caf60a0ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.protobuf.ByteString; import javax.crypto.SecretKey; import org.apache.commons.logging.Log; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java index ea85a476a429a..4a017ed86b30c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.util.Daemon; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; @@ -188,6 +188,9 @@ void release() { this.maxXceiverCount = conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT); + Preconditions.checkArgument(this.maxXceiverCount >= 1, + DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY + + " should not be less than 1."); this.estimateBlockSize = conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 35625ce121d94..be7e47a73e313 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -46,15 +46,14 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo; -import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.StopWatch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.ListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ListMultimap; /** * Periodically scans the data directories for block and block metadata files. @@ -472,88 +471,84 @@ private void scan() { // Pre-sort the reports outside of the lock blockPoolReport.sortBlocks(); - // Hold FSDataset lock to prevent further changes to the block map - try (AutoCloseableLock lock = dataset.acquireDatasetLock()) { - for (final String bpid : blockPoolReport.getBlockPoolIds()) { - List blockpoolReport = blockPoolReport.getScanInfo(bpid); - - Stats statsRecord = new Stats(bpid); - stats.put(bpid, statsRecord); - Collection diffRecord = new ArrayList<>(); - - statsRecord.totalBlocks = blockpoolReport.size(); - final List bl = dataset.getFinalizedBlocks(bpid); - Collections.sort(bl); // Sort based on blockId - - int d = 0; // index for blockpoolReport - int m = 0; // index for memReprot - while (m < bl.size() && d < blockpoolReport.size()) { - ReplicaInfo memBlock = bl.get(m); - ScanInfo info = blockpoolReport.get(d); - if (info.getBlockId() < memBlock.getBlockId()) { - if (!dataset.isDeletingBlock(bpid, info.getBlockId())) { - // Block is missing in memory - statsRecord.missingMemoryBlocks++; - addDifference(diffRecord, statsRecord, info); - } - d++; - continue; - } - if (info.getBlockId() > memBlock.getBlockId()) { - // Block is missing on the disk - addDifference(diffRecord, statsRecord, memBlock.getBlockId(), - info.getVolume()); - m++; - continue; - } - // Block file and/or metadata file exists on the disk - // Block exists in memory - if (info.getVolume().getStorageType() != StorageType.PROVIDED - && info.getBlockFile() == null) { - // Block metadata file exits and block file is missing - addDifference(diffRecord, statsRecord, info); - } else if (info.getGenStamp() != memBlock.getGenerationStamp() - || info.getBlockLength() != memBlock.getNumBytes()) { - // Block metadata file is missing or has wrong generation stamp, - // or block file length is different than expected - statsRecord.mismatchBlocks++; - addDifference(diffRecord, statsRecord, info); - } else if (memBlock.compareWith(info) != 0) { - // volumeMap record and on-disk files do not match. - statsRecord.duplicateBlocks++; + for (final String bpid : blockPoolReport.getBlockPoolIds()) { + List blockpoolReport = blockPoolReport.getScanInfo(bpid); + + Stats statsRecord = new Stats(bpid); + stats.put(bpid, statsRecord); + Collection diffRecord = new ArrayList<>(); + + statsRecord.totalBlocks = blockpoolReport.size(); + final List bl; + bl = dataset.getSortedFinalizedBlocks(bpid); + + int d = 0; // index for blockpoolReport + int m = 0; // index for memReprot + while (m < bl.size() && d < blockpoolReport.size()) { + ReplicaInfo memBlock = bl.get(m); + ScanInfo info = blockpoolReport.get(d); + if (info.getBlockId() < memBlock.getBlockId()) { + if (!dataset.isDeletingBlock(bpid, info.getBlockId())) { + // Block is missing in memory + statsRecord.missingMemoryBlocks++; addDifference(diffRecord, statsRecord, info); } d++; - - if (d < blockpoolReport.size()) { - // There may be multiple on-disk records for the same block, do not - // increment the memory record pointer if so. - ScanInfo nextInfo = blockpoolReport.get(d); - if (nextInfo.getBlockId() != info.getBlockId()) { - ++m; - } - } else { - ++m; - } + continue; } - while (m < bl.size()) { - ReplicaInfo current = bl.get(m++); - addDifference(diffRecord, statsRecord, current.getBlockId(), - current.getVolume()); + if (info.getBlockId() > memBlock.getBlockId()) { + // Block is missing on the disk + addDifference(diffRecord, statsRecord, memBlock.getBlockId(), + info.getVolume()); + m++; + continue; } - while (d < blockpoolReport.size()) { - if (!dataset.isDeletingBlock(bpid, - blockpoolReport.get(d).getBlockId())) { - statsRecord.missingMemoryBlocks++; - addDifference(diffRecord, statsRecord, blockpoolReport.get(d)); + // Block file and/or metadata file exists on the disk + // Block exists in memory + if (info.getBlockFile() == null) { + // Block metadata file exits and block file is missing + addDifference(diffRecord, statsRecord, info); + } else if (info.getGenStamp() != memBlock.getGenerationStamp() + || info.getBlockLength() != memBlock.getNumBytes()) { + // Block metadata file is missing or has wrong generation stamp, + // or block file length is different than expected + statsRecord.mismatchBlocks++; + addDifference(diffRecord, statsRecord, info); + } else if (memBlock.compareWith(info) != 0) { + // volumeMap record and on-disk files do not match. + statsRecord.duplicateBlocks++; + addDifference(diffRecord, statsRecord, info); + } + d++; + + if (d < blockpoolReport.size()) { + // There may be multiple on-disk records for the same block, do not + // increment the memory record pointer if so. + ScanInfo nextInfo = blockpoolReport.get(d); + if (nextInfo.getBlockId() != info.getBlockId()) { + ++m; } - d++; + } else { + ++m; } - synchronized (diffs) { - diffs.addAll(bpid, diffRecord); + } + while (m < bl.size()) { + ReplicaInfo current = bl.get(m++); + addDifference(diffRecord, statsRecord, current.getBlockId(), + current.getVolume()); + } + while (d < blockpoolReport.size()) { + if (!dataset.isDeletingBlock(bpid, + blockpoolReport.get(d).getBlockId())) { + statsRecord.missingMemoryBlocks++; + addDifference(diffRecord, statsRecord, blockpoolReport.get(d)); } - LOG.info("Scan Results: {}", statsRecord); + d++; + } + synchronized (diffs) { + diffs.addAll(bpid, diffRecord); } + LOG.info("Scan Results: {}", statsRecord); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java index ffa8524a2f967..1c57e38615ec9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java @@ -18,8 +18,8 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -902,7 +902,7 @@ private long getMaxError(DiskBalancerWorkItem item) { */ private ExtendedBlock getBlockToCopy(FsVolumeSpi.BlockIterator iter, DiskBalancerWorkItem item) { - while (!iter.atEnd() && item.getErrorCount() < getMaxError(item)) { + while (!iter.atEnd() && item.getErrorCount() <= getMaxError(item)) { try { ExtendedBlock block = iter.nextBlock(); if(null == block){ @@ -923,7 +923,7 @@ private ExtendedBlock getBlockToCopy(FsVolumeSpi.BlockIterator iter, item.incErrorCount(); } } - if (item.getErrorCount() >= getMaxError(item)) { + if (item.getErrorCount() > getMaxError(item)) { item.setErrMsg("Error count exceeded."); LOG.info("Maximum error count exceeded. Error count: {} Max error:{} ", item.getErrorCount(), item.getMaxDiskErrors()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java index fc98d3a6b7a8e..cf6902912f6ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java @@ -280,7 +280,12 @@ public void transferToSocketFully( profilingEventHook.afterFileIo(volume, TRANSFER, begin, count); } catch (Exception e) { String em = e.getMessage(); - if (!em.startsWith("Broken pipe") && !em.startsWith("Connection reset")) { + if (em != null) { + if (!em.startsWith("Broken pipe") + && !em.startsWith("Connection reset")) { + onFailure(volume, begin); + } + } else { onFailure(volume, begin); } throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java index 73cc44b31f9cf..f55b8c2b73425 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java @@ -37,8 +37,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * Manage Incremental Block Reports (IBRs). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java index cb53a64f7defe..b711e1a8f2115 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java @@ -42,7 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class is used for all replicas which are on local storage media diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java index bd23021f5cdef..00640f62c8428 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProvidedReplica.java @@ -23,7 +23,7 @@ import java.io.OutputStream; import java.net.URI; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.input.BoundedInputStream; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index 84fac578d9798..6c666411c9671 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.daemon.Daemon; import org.apache.commons.daemon.DaemonContext; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java index 3df83cfbad4ae..fe94de46698c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java @@ -30,7 +30,7 @@ import java.util.Iterator; import java.util.Set; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,9 +46,9 @@ import org.apache.hadoop.net.unix.DomainSocketWatcher; import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.HashMultimap; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; /** * Manages client short-circuit memory segments on the DataNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index 84cfb04801d88..6bc25eb24a675 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -19,19 +19,23 @@ package org.apache.hadoop.hdfs.server.datanode; import java.io.DataOutputStream; +import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; @@ -479,6 +483,50 @@ static boolean calculateShouldScan(String storageId, long targetBytesPerSec, return shouldScan; } + /** + * Get next block and check if it's needed to scan. + * + * @return the candidate block. + */ + ExtendedBlock getNextBlockToScan() { + ExtendedBlock block; + try { + block = curBlockIter.nextBlock(); + } catch (IOException e) { + // There was an error listing the next block in the volume. This is a + // serious issue. + LOG.warn("{}: nextBlock error on {}", this, curBlockIter); + // On the next loop iteration, curBlockIter#eof will be set to true, and + // we will pick a different block iterator. + return null; + } + if (block == null) { + // The BlockIterator is at EOF. + LOG.info("{}: finished scanning block pool {}", + this, curBlockIter.getBlockPoolId()); + saveBlockIterator(curBlockIter); + return null; + } else if (conf.skipRecentAccessed) { + // Check the access time of block file to avoid scanning recently + // changed blocks, reducing disk IO. + try { + BlockLocalPathInfo blockLocalPathInfo = + volume.getDataset().getBlockLocalPathInfo(block); + BasicFileAttributes attr = Files.readAttributes( + new File(blockLocalPathInfo.getBlockPath()).toPath(), + BasicFileAttributes.class); + if (System.currentTimeMillis() - attr.lastAccessTime(). + to(TimeUnit.MILLISECONDS) < conf.scanPeriodMs) { + return null; + } + } catch (IOException ioe) { + LOG.debug("Failed to get access time of block {}", + block, ioe); + } + } + return block; + } + /** * Run an iteration of the VolumeScanner loop. * @@ -503,10 +551,10 @@ private long runLoop(ExtendedBlock suspectBlock) { return 30000L; } - // Find a usable block pool to scan. if (suspectBlock != null) { block = suspectBlock; } else { + // Find a usable block pool to scan. if ((curBlockIter == null) || curBlockIter.atEnd()) { long timeout = findNextUsableBlockIter(); if (timeout > 0) { @@ -524,22 +572,9 @@ private long runLoop(ExtendedBlock suspectBlock) { } return 0L; } - try { - block = curBlockIter.nextBlock(); - } catch (IOException e) { - // There was an error listing the next block in the volume. This is a - // serious issue. - LOG.warn("{}: nextBlock error on {}", this, curBlockIter); - // On the next loop iteration, curBlockIter#eof will be set to true, and - // we will pick a different block iterator. - return 0L; - } + block = getNextBlockToScan(); if (block == null) { - // The BlockIterator is at EOF. - LOG.info("{}: finished scanning block pool {}", - this, curBlockIter.getBlockPoolId()); - saveBlockIterator(curBlockIter); - return 0; + return 0L; } } if (curBlockIter != null) { @@ -635,12 +670,14 @@ public void run() { LOG.error("{} exiting because of exception ", this, e); } LOG.info("{} exiting.", this); + VolumeScannerCBInjector.get().preSavingBlockIteratorTask(this); // Save the current position of all block iterators and close them. for (BlockIterator iter : blockIters) { saveBlockIterator(iter); IOUtils.cleanup(null, iter); } } finally { + VolumeScannerCBInjector.get().terminationCallBack(this); // When the VolumeScanner exits, release the reference we were holding // on the volume. This will allow the volume to be removed later. IOUtils.cleanup(null, ref); @@ -660,6 +697,7 @@ public synchronized void shutdown() { stopping = true; notify(); this.interrupt(); + VolumeScannerCBInjector.get().shutdownCallBack(this); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScannerCBInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScannerCBInjector.java new file mode 100644 index 0000000000000..d15d8d45d5e60 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScannerCBInjector.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Used for injecting call backs in {@link VolumeScanner} + * and {@link BlockScanner} tests. + * Calls into this are a no-op in production code. + */ +@VisibleForTesting +@InterfaceAudience.Private +public class VolumeScannerCBInjector { + private static VolumeScannerCBInjector instance = + new VolumeScannerCBInjector(); + + public static VolumeScannerCBInjector get() { + return instance; + } + + public static void set(VolumeScannerCBInjector injector) { + instance = injector; + } + + public void preSavingBlockIteratorTask(final VolumeScanner volumeScanner) { + } + + public void shutdownCallBack(final VolumeScanner volumeScanner) { + } + + public void terminationCallBack(final VolumeScanner volumeScanner) { + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java index 06867fbceb793..6ae4f01299ceb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java @@ -20,15 +20,15 @@ */ package org.apache.hadoop.hdfs.server.datanode.checker; -import com.google.common.annotations.Beta; -import com.google.common.annotations.GwtCompatible; -import com.google.common.base.Preconditions; -import static com.google.common.base.Preconditions.checkNotNull; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; -import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.thirdparty.com.google.common.annotations.Beta; +import org.apache.hadoop.thirdparty.com.google.common.annotations.GwtCompatible; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater .newUpdater; @@ -55,7 +55,7 @@ * include instantiating a {@link SettableFuture}, submitting a task to a * {@link ListeningExecutorService}, and deriving a {@code Future} from an * existing one, typically using methods like {@link Futures#transform - * (ListenableFuture, com.google.common.base.Function) Futures.transform} + * (ListenableFuture, org.apache.hadoop.thirdparty.com.google.common.base.Function) Futures.transform} * and its overloaded versions. *

    *

    This class implements all methods in {@code ListenableFuture}. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java index d69845448b7aa..75b0ebea389cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.checker; import java.util.Optional; -import com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java index 91582fe0558a8..d077d215a5750 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java @@ -18,14 +18,14 @@ package org.apache.hadoop.hdfs.server.datanode.checker; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; @@ -354,23 +354,29 @@ private class ResultHandler } @Override - public void onSuccess(@Nonnull VolumeCheckResult result) { - switch(result) { - case HEALTHY: - case DEGRADED: - LOG.debug("Volume {} is {}.", reference.getVolume(), result); - markHealthy(); - break; - case FAILED: - LOG.warn("Volume {} detected as being unhealthy", + public void onSuccess(VolumeCheckResult result) { + if (result == null) { + LOG.error("Unexpected health check result null for volume {}", reference.getVolume()); - markFailed(); - break; - default: - LOG.error("Unexpected health check result {} for volume {}", - result, reference.getVolume()); markHealthy(); - break; + } else { + switch(result) { + case HEALTHY: + case DEGRADED: + LOG.debug("Volume {} is {}.", reference.getVolume(), result); + markHealthy(); + break; + case FAILED: + LOG.warn("Volume {} detected as being unhealthy", + reference.getVolume()); + markFailed(); + break; + default: + LOG.error("Unexpected health check result {} for volume {}", + result, reference.getVolume()); + markHealthy(); + break; + } } cleanup(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java index 0332bc8633950..3d49dd5362864 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java @@ -20,9 +20,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import com.google.common.collect.Maps; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java index 032379a4d12a1..f969c7ade288b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hdfs.server.datanode.checker; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.Timer; @@ -117,8 +117,8 @@ public ThrottledAsyncChecker(final Timer timer, * will receive the same Future. */ @Override - public Optional> schedule(Checkable target, - K context) { + public synchronized Optional> schedule( + Checkable target, K context) { if (checksInProgress.containsKey(target)) { return Optional.empty(); } @@ -166,7 +166,7 @@ private void addResultCachingCallback( Checkable target, ListenableFuture lf) { Futures.addCallback(lf, new FutureCallback() { @Override - public void onSuccess(@Nullable V result) { + public void onSuccess(V result) { synchronized (ThrottledAsyncChecker.this) { checksInProgress.remove(target); completedChecks.put(target, new LastCheckResult<>( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java index ae7b34f773468..d014e499f912e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java @@ -20,8 +20,8 @@ */ package org.apache.hadoop.hdfs.server.datanode.checker; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java index f9063b7a8929f..9fb5d9e708ca2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -106,7 +106,7 @@ public void rejectedExecution(Runnable runnable, private void initializeStripedBlkReconstructionThreadPool(int numThreads) { LOG.debug("Using striped block reconstruction; pool threads={}", numThreads); - stripedReconstructionPool = DFSUtilClient.getThreadPoolExecutor(2, + stripedReconstructionPool = DFSUtilClient.getThreadPoolExecutor(numThreads, numThreads, 60, new LinkedBlockingQueue<>(), "StripedBlockReconstruction-", false); stripedReconstructionPool.allowCoreThreadTimeOut(true); @@ -121,7 +121,6 @@ private void initializeStripedBlkReconstructionThreadPool(int numThreads) { public void processErasureCodingTasks( Collection ecTasks) { for (BlockECReconstructionInfo reconInfo : ecTasks) { - int xmitsSubmitted = 0; try { StripedReconstructionInfo stripedReconInfo = new StripedReconstructionInfo( @@ -134,20 +133,19 @@ public void processErasureCodingTasks( final StripedBlockReconstructor task = new StripedBlockReconstructor(this, stripedReconInfo); if (task.hasValidTargets()) { + stripedReconstructionPool.submit(task); // See HDFS-12044. We increase xmitsInProgress even the task is only // enqueued, so that // 1) NN will not send more tasks than what DN can execute and // 2) DN will not throw away reconstruction tasks, and instead keeps // an unbounded number of tasks in the executor's task queue. - xmitsSubmitted = Math.max((int)(task.getXmits() * xmitWeight), 1); + int xmitsSubmitted = Math.max((int)(task.getXmits() * xmitWeight), 1); getDatanode().incrementXmitsInProcess(xmitsSubmitted); - stripedReconstructionPool.submit(task); } else { LOG.warn("No missing internal block. Skip reconstruction for task:{}", reconInfo); } } catch (Throwable e) { - getDatanode().decrementXmitsInProgress(xmitsSubmitted); LOG.warn("Failed to reconstruct striped block {}", reconInfo.getExtendedBlock().getLocalBlock(), e); } @@ -170,4 +168,8 @@ public void shutDown() { stripedReconstructionPool.shutdown(); stripedReadPool.shutdown(); } + + public float getXmitWeight() { + return xmitWeight; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockChecksumReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockChecksumReconstructor.java index b2e64966a18b0..e28d6c556b803 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockChecksumReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockChecksumReconstructor.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; +import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.io.DataOutputBuffer; /** @@ -32,7 +34,7 @@ */ @InterfaceAudience.Private public abstract class StripedBlockChecksumReconstructor - extends StripedReconstructor { + extends StripedReconstructor implements Closeable { private ByteBuffer targetBuffer; private final byte[] targetIndices; @@ -73,31 +75,28 @@ private void init() throws IOException { public void reconstruct() throws IOException { prepareDigester(); long maxTargetLength = getMaxTargetLength(); - try { - while (requestedLen > 0 && getPositionInBlock() < maxTargetLength) { - long remaining = maxTargetLength - getPositionInBlock(); - final int toReconstructLen = (int) Math - .min(getStripedReader().getBufferSize(), remaining); - // step1: read from minimum source DNs required for reconstruction. - // The returned success list is the source DNs we do real read from - getStripedReader().readMinimumSources(toReconstructLen); - - // step2: decode to reconstruct targets - reconstructTargets(toReconstructLen); - - // step3: calculate checksum - checksumDataLen += checksumWithTargetOutput( - targetBuffer.array(), toReconstructLen); - - updatePositionInBlock(toReconstructLen); - requestedLen -= toReconstructLen; - clearBuffers(); - } - - commitDigest(); - } finally { - cleanup(); + while (requestedLen > 0 && getPositionInBlock() < maxTargetLength) { + DataNodeFaultInjector.get().stripedBlockChecksumReconstruction(); + long remaining = maxTargetLength - getPositionInBlock(); + final int toReconstructLen = (int) Math + .min(getStripedReader().getBufferSize(), remaining); + // step1: read from minimum source DNs required for reconstruction. + // The returned success list is the source DNs we do real read from + getStripedReader().readMinimumSources(toReconstructLen); + + // step2: decode to reconstruct targets + reconstructTargets(toReconstructLen); + + // step3: calculate checksum + checksumDataLen += checksumWithTargetOutput( + getBufferArray(targetBuffer), toReconstructLen); + + updatePositionInBlock(toReconstructLen); + requestedLen -= toReconstructLen; + clearBuffers(); } + + commitDigest(); } /** @@ -140,7 +139,7 @@ private long checksumWithTargetOutput(byte[] outputData, int toReconstructLen) // case-2) length of data bytes which is less than bytesPerCRC if (requestedLen <= toReconstructLen) { int remainingLen = Math.toIntExact(requestedLen); - outputData = Arrays.copyOf(targetBuffer.array(), remainingLen); + outputData = Arrays.copyOf(outputData, remainingLen); int partialLength = remainingLen % getChecksum().getBytesPerChecksum(); @@ -207,4 +206,25 @@ private void clearBuffers() { public long getChecksumDataLen() { return checksumDataLen; } + + /** + * Gets an array corresponding the buffer. + * @param buffer the input buffer. + * @return the array with content of the buffer. + */ + private static byte[] getBufferArray(ByteBuffer buffer) { + byte[] buff = new byte[buffer.remaining()]; + if (buffer.hasArray()) { + buff = buffer.array(); + } else { + buffer.slice().get(buff); + } + return buff; + } + + @Override + public void close() throws IOException { + getStripedReader().close(); + cleanup(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java index 4dc51c9916ff7..b1ad03f28dbbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.util.StripedBlockUtil.BlockReadStats; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; @@ -95,6 +96,7 @@ ByteBuffer getReadBuffer() { } void freeReadBuffer() { + DataNodeFaultInjector.get().interceptFreeBlockReaderBuffer(); buffer = null; } @@ -179,6 +181,8 @@ public BlockReadStats call() throws Exception { } catch (IOException e) { LOG.info(e.getMessage()); throw e; + } finally { + DataNodeFaultInjector.get().interceptBlockReader(); } } }; @@ -188,6 +192,7 @@ public BlockReadStats call() throws Exception { * Perform actual reading of bytes from block. */ private BlockReadStats actualReadFromBlock() throws IOException { + DataNodeFaultInjector.get().delayBlockReader(); int len = buffer.remaining(); int n = 0; while (n < len) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java index 29c0078e95710..1af2380886ac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java @@ -67,7 +67,11 @@ public void run() { LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e); getDatanode().getMetrics().incrECFailedReconstructionTasks(); } finally { - getDatanode().decrementXmitsInProgress(getXmits()); + float xmitWeight = getErasureCodingWorker().getXmitWeight(); + // if the xmits is smaller than 1, the xmitsSubmitted should be set to 1 + // because if it set to zero, we cannot to measure the xmits submitted + int xmitsSubmitted = Math.max((int) (getXmits() * xmitWeight), 1); + getDatanode().decrementXmitsInProgress(xmitsSubmitted); final DataNodeMetrics metrics = getDatanode().getMetrics(); metrics.incrECReconstructionTasks(); metrics.incrECReconstructionBytesRead(getBytesRead()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java index 98edf724a8e25..b1992ea1ac165 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -328,14 +329,14 @@ int[] doReadMinimumSources(int reconstructLength, // cancel remaining reads if we read successfully from minimum // number of source DNs required by reconstruction. cancelReads(futures.keySet()); - futures.clear(); + clearFuturesAndService(); break; } } } catch (InterruptedException e) { LOG.info("Read data interrupted.", e); cancelReads(futures.keySet()); - futures.clear(); + clearFuturesAndService(); break; } } @@ -429,6 +430,20 @@ private static void cancelReads(Collection> futures) { } } + // remove all stale futures from readService, and clear futures. + private void clearFuturesAndService() { + while (!futures.isEmpty()) { + try { + Future future = readService.poll( + stripedReadTimeoutInMills, TimeUnit.MILLISECONDS + ); + futures.remove(future); + } catch (InterruptedException e) { + LOG.info("Clear stale futures from service is interrupted.", e); + } + } + } + void close() { if (zeroStripeBuffers != null) { for (ByteBuffer zeroStripeBuffer : zeroStripeBuffers) { @@ -438,9 +453,9 @@ void close() { zeroStripeBuffers = null; for (StripedBlockReader reader : readers) { + reader.closeBlockReader(); reconstructor.freeBuffer(reader.getReadBuffer()); reader.freeReadBuffer(); - reader.closeBlockReader(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java index a1f4c7ff55e37..851f695662082 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -275,4 +276,13 @@ Configuration getConf() { DataNode getDatanode() { return datanode; } + + public ErasureCodingWorker getErasureCodingWorker() { + return erasureCodingWorker; + } + + @VisibleForTesting + static ByteBufferPool getBufferPool() { + return BUFFER_POOL; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java index 762506cfdaefb..683b63fa149af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; @@ -296,7 +296,8 @@ boolean hasValidTargets() { */ void clearBuffers() { for (StripedBlockWriter writer : writers) { - ByteBuffer targetBuffer = writer.getTargetBuffer(); + ByteBuffer targetBuffer = + writer != null ? writer.getTargetBuffer() : null; if (targetBuffer != null) { targetBuffer.clear(); } @@ -305,7 +306,8 @@ void clearBuffers() { void close() { for (StripedBlockWriter writer : writers) { - ByteBuffer targetBuffer = writer.getTargetBuffer(); + ByteBuffer targetBuffer = + writer != null ? writer.getTargetBuffer() : null; if (targetBuffer != null) { reconstructor.freeBuffer(targetBuffer); writer.freeTargetBuffer(); @@ -313,7 +315,9 @@ void close() { } for (int i = 0; i < targets.length; i++) { - writers[i].close(); + if (writers[i] != null) { + writers[i].close(); + } } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index 2e5135d841f4b..854953a2ba820 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -237,16 +237,17 @@ StorageReport[] getStorageReports(String bpid) VolumeFailureSummary getVolumeFailureSummary(); /** - * Gets a list of references to the finalized blocks for the given block pool. + * Gets a sorted list of references to the finalized blocks for the given + * block pool. The list is sorted by blockID. *

    * Callers of this function should call * {@link FsDatasetSpi#acquireDatasetLock} to avoid blocks' status being * changed during list iteration. *

    * @return a list of references to the finalized blocks for the given block - * pool. + * pool. The list is sorted by blockID. */ - List getFinalizedBlocks(String bpid); + List getSortedFinalizedBlocks(String bpid); /** * Check whether the in-memory block record matches the block on the disk, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index fc84c4d280cde..20df0e986b718 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -76,7 +76,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.Timer; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A block pool slice represents a portion of a block pool stored on a volume. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java index f79b7c7374c6e..476a31e2ab132 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/CacheStats.java @@ -22,7 +22,7 @@ import org.apache.hadoop.io.nativeio.NativeIO; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Keeps statistics for the memory cache. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index 732dc2180eabf..b6a57fdeffa77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -23,8 +23,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS_DEFAULT; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.FileInputStream; import java.io.FileNotFoundException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 39152fccb8f26..4526ecac840d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -48,7 +48,7 @@ import javax.management.ObjectName; import javax.management.StandardMBean; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; @@ -120,9 +120,9 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Timer; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -575,7 +575,8 @@ public void removeVolumes( // Unlike updating the volumeMap in addVolume(), this operation does // not scan disks. for (String bpid : volumeMap.getBlockPoolList()) { - List blocks = new ArrayList<>(); + List blocks = blkToInvalidate + .computeIfAbsent(bpid, (k) -> new ArrayList<>()); for (Iterator it = volumeMap.replicas(bpid).iterator(); it.hasNext();) { ReplicaInfo block = it.next(); @@ -588,9 +589,7 @@ public void removeVolumes( it.remove(); } } - blkToInvalidate.put(bpid, blocks); } - storageToRemove.add(sd.getStorageUuid()); storageLocationsToRemove.remove(sdLocation); } @@ -1902,28 +1901,32 @@ public Map getBlockReports(String bpid) { continue; } String volStorageID = b.getVolume().getStorageID(); - if (!builders.containsKey(volStorageID)) { - if (!missingVolumesReported.contains(volStorageID)) { - LOG.warn("Storage volume: " + volStorageID + " missing for the" - + " replica block: " + b + ". Probably being removed!"); - missingVolumesReported.add(volStorageID); - } - continue; - } switch(b.getState()) { case FINALIZED: case RBW: case RWR: - builders.get(volStorageID).add(b); break; case RUR: - ReplicaInfo orig = b.getOriginalReplica(); - builders.get(volStorageID).add(orig); + // use the original replica. + b = b.getOriginalReplica(); break; case TEMPORARY: - break; + continue; default: assert false : "Illegal ReplicaInfo state."; + continue; + } + BlockListAsLongs.Builder storageBuilder = builders.get(volStorageID); + // a storage in the process of failing will not be in the volumes list + // but will be in the replica map. + if (storageBuilder != null) { + storageBuilder.add(b); + } else { + if (!missingVolumesReported.contains(volStorageID)) { + LOG.warn("Storage volume: " + volStorageID + " missing for the" + + " replica block: " + b + ". Probably being removed!"); + missingVolumesReported.add(volStorageID); + } } } } @@ -1937,17 +1940,18 @@ public Map getBlockReports(String bpid) { } /** - * Gets a list of references to the finalized blocks for the given block pool. + * Gets a list of references to the finalized blocks for the given block pool, + * sorted by blockID. *

    * Callers of this function should call * {@link FsDatasetSpi#acquireDatasetLock()} to avoid blocks' status being * changed during list iteration. *

    * @return a list of references to the finalized blocks for the given block - * pool. + * pool. The list is sorted by blockID. */ @Override - public List getFinalizedBlocks(String bpid) { + public List getSortedFinalizedBlocks(String bpid) { try (AutoCloseableLock lock = datasetWriteLock.acquire()) { final List finalized = new ArrayList( volumeMap.size(bpid)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java index fbd02c76820cd..b5d2b95e219cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java @@ -34,7 +34,7 @@ import java.nio.file.Paths; import java.util.Arrays; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.Block; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index aa0cc5685fd81..d644e05038b85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -86,10 +86,10 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * The underlying volume used to store replica. @@ -133,6 +133,7 @@ public class FsVolumeImpl implements FsVolumeSpi { protected volatile long configuredCapacity; private final FileIoProvider fileIoProvider; private final DataNodeVolumeMetrics metrics; + private URI baseURI; /** * Per-volume worker pool that processes new blocks to cache. @@ -182,6 +183,7 @@ public class FsVolumeImpl implements FsVolumeSpi { File parent = currentDir.getParentFile(); cacheExecutor = initializeCacheExecutor(parent); this.metrics = DataNodeVolumeMetrics.create(conf, parent.getPath()); + this.baseURI = new File(currentDir.getParent()).toURI(); } else { cacheExecutor = null; this.metrics = null; @@ -506,7 +508,7 @@ BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException { @Override public URI getBaseURI() { - return new File(currentDir.getParent()).toURI(); + return baseURI; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java index 50ab97b87a7fc..5cdd45c70fcf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplBuilder.java @@ -19,7 +19,7 @@ import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java index 02bea81f70d20..96d88345e6b9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java index 55e5decd0832c..ec024cda9ab02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemVolumeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemVolumeManager.java index 51b768198b7a2..a85c577745af4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemVolumeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemVolumeManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.io.filefilter.TrueFileFilter; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java index b4d2c640cd804..7e077181707af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java @@ -65,7 +65,7 @@ import org.codehaus.jackson.map.ObjectReader; import org.codehaus.jackson.map.ObjectWriter; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class is used to create provided volumes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java index b940736ccfd28..31e9ebe0b8c2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.collect.TreeMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.TreeMultimap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java index 07e520117f617..f7b12ff179941 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java index a8a6c85762d62..e431bde9f15eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.datanode.metrics; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.datanode.DataNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java index cc802375f9f28..6e633147b5934 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java @@ -109,6 +109,12 @@ public class DataNodeMetrics { @Metric("Count of active dataNode xceivers") private MutableGaugeInt dataNodeActiveXceiversCount; + @Metric("Count of active DataNode packetResponder") + private MutableGaugeInt dataNodePacketResponderCount; + + @Metric("Count of active DataNode block recovery worker") + private MutableGaugeInt dataNodeBlockRecoveryWorkerCount; + @Metric MutableRate readBlockOp; @Metric MutableRate writeBlockOp; @Metric MutableRate blockChecksumOp; @@ -525,6 +531,42 @@ public void setDataNodeActiveXceiversCount(int value) { dataNodeActiveXceiversCount.set(value); } + public int getDataNodeActiveXceiverCount() { + return dataNodeActiveXceiversCount.value(); + } + + public void incrDataNodePacketResponderCount() { + dataNodePacketResponderCount.incr(); + } + + public void decrDataNodePacketResponderCount() { + dataNodePacketResponderCount.decr(); + } + + public void setDataNodePacketResponderCount(int value) { + dataNodePacketResponderCount.set(value); + } + + public int getDataNodePacketResponderCount() { + return dataNodePacketResponderCount.value(); + } + + public void incrDataNodeBlockRecoveryWorkerCount() { + dataNodeBlockRecoveryWorkerCount.incr(); + } + + public void decrDataNodeBlockRecoveryWorkerCount() { + dataNodeBlockRecoveryWorkerCount.decr(); + } + + public void setDataNodeBlockRecoveryWorkerCount(int value) { + dataNodeBlockRecoveryWorkerCount.set(value); + } + + public int getDataNodeBlockRecoveryWorkerCount() { + return dataNodeBlockRecoveryWorkerCount.value(); + } + public void incrECDecodingTime(long decodingTimeNanos) { ecDecodingTimeNanos.incr(decodingTimeNanos); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java index 401fc8bd712d3..a30baa1d23bea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode.metrics; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/HostRestrictingAuthorizationFilterHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/HostRestrictingAuthorizationFilterHandler.java index 584e12bdc9b02..798def0c716b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/HostRestrictingAuthorizationFilterHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/HostRestrictingAuthorizationFilterHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.web; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandler.Sharable; import io.netty.channel.ChannelHandlerContext; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java index 366f47f29631d..293102c9d8560 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java @@ -23,9 +23,9 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import java.io.ByteArrayInputStream; import java.io.DataInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java index b56b0d09ac28b..02ec25c13c874 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.web.webhdfs; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import com.sun.jersey.api.ParamException; import com.sun.jersey.api.container.ContainerException; import io.netty.buffer.Unpooled; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java index ff68a7ee7e4d0..d0c71f6be1606 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.web.webhdfs; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java index 007272eda9e70..7ee2251fac106 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.codec.digest.DigestUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java index 4f8e373d9c2ce..6845c572ef6b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java @@ -20,9 +20,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java index c7cb089c5b72d..06e8dd008f5ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.codec.digest.DigestUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java index f7c84e16f7b81..e36628edf0eb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java index ebcbb4c2fa9f9..e9f9f33e71535 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import com.google.common.base.Preconditions; -import com.google.common.base.Throwables; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Throwables; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java index a8adcbd5621bb..520e80f3974c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java index 4f75aff1a518c..ad5a3c2090edf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet; import org.apache.hadoop.hdfs.tools.DiskBalancerCLI; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Executes the report command. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java index 2d8ba8a0a0724..b7bb3f02dce9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.connectors; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java index 838511ba82bf8..268c055a354ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java @@ -19,7 +19,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java index 1307983c80927..0e405ff7bd3c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.io.FileUtils; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java index 6cf244be9442d..fce858aaca01b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.datamodel; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.util.HashMap; import java.util.Map; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java index 5a4eb6d68554d..bcce012ff84b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java index 3f9734537a52b..0ed56afb39a5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.planner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java index 44039eaa0f36e..72df5abe6bcaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.io.IOException; import java.util.LinkedList; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index 06c7cc5a5acf7..cae6b68793580 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hdfs.server.mover; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.cli.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -134,9 +134,17 @@ private List getTargetStorages(StorageType t) { final int maxNoMoveInterval = conf.getInt( DFSConfigKeys.DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY, DFSConfigKeys.DFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT); - this.retryMaxAttempts = conf.getInt( + final int maxAttempts = conf.getInt( DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT); + if (maxAttempts >= 0) { + this.retryMaxAttempts = maxAttempts; + } else { + LOG.warn(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY + " is " + + "configured with a negative value, using default value of " + + DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT); + this.retryMaxAttempts = DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT; + } this.retryCount = retryCount; this.dispatcher = new Dispatcher(nnc, Collections. emptySet(), Collections. emptySet(), movedWinWidth, moverThreads, 0, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java index e9e3e598fc12a..a610aee48496c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.util.LongBitFormat; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * Class to pack an AclEntry into an integer.
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java index 6d546f81c4f99..2dfe50742dac2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java @@ -24,8 +24,8 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.hdfs.util.ReferenceCountMap.ReferenceCounter; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * Feature that represents the ACLs of the inode. @@ -83,17 +83,17 @@ public int hashCode() { } @Override - public int getRefCount() { + public synchronized int getRefCount() { return refCount; } @Override - public int incrementAndGetRefCount() { + public synchronized int incrementAndGetRefCount() { return ++refCount; } @Override - public int decrementAndGetRefCount() { + public synchronized int decrementAndGetRefCount() { return (refCount > 0) ? --refCount : 0; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java index fa268c72a1eb2..806cdc6d6940b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java @@ -20,9 +20,9 @@ import java.util.Collections; import java.util.List; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.AclEntry; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java index 4402e263a2108..031929ce67c79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java @@ -28,10 +28,10 @@ import java.util.Iterator; import java.util.List; -import com.google.common.collect.ComparisonChain; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Ordering; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Ordering; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.AclEntry; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index 9f5f29e371432..bbe607670f71a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageState; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Extension of FSImage for the backup node. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 2fb1933c421cc..dab227fcc763c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -51,7 +51,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.BlockingService; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 33bb74caabe4f..68ab12c9eb4e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -43,6 +43,7 @@ import java.util.TreeMap; import java.util.concurrent.locks.ReentrantLock; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -89,10 +90,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimap; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; /** * The Cache Manager handles caching on DataNodes. @@ -935,6 +936,11 @@ public void setCachedLocations(LocatedBlocks locations) { } } + @SuppressFBWarnings( + value="EC_UNRELATED_TYPES", + justification="HDFS-15255 Asked Wei-Chiu and Pifta to review this" + + " warning and we all agree the code is OK and the warning is not " + + "needed") private void setCachedLocations(LocatedBlock block) { CachedBlock cachedBlock = new CachedBlock(block.getBlock().getBlockId(), @@ -1071,6 +1077,10 @@ public PersistState saveState() throws IOException { if (p.getLimit() != null) b.setLimit(p.getLimit()); + if (p.getMaxRelativeExpiryMs() != null) { + b.setMaxRelativeExpiry(p.getMaxRelativeExpiryMs()); + } + pools.add(b.build()); } @@ -1136,6 +1146,10 @@ public void loadState(PersistState s) throws IOException { if (p.hasLimit()) info.setLimit(p.getLimit()); + if (p.hasMaxRelativeExpiry()) { + info.setMaxRelativeExpiryMs(p.getMaxRelativeExpiry()); + } + addCachePool(info); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index 20b1d25434a1b..dda4789b74e49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -32,7 +32,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.IntrusiveCollection; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A CachePool describes a set of cache resources being managed by the NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java index 186bc3d727639..4df170d771601 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import java.util.concurrent.TimeUnit; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java index 18cc7afbe9d5c..5c753070a459e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; -import com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; /** * A unique signature intended to identify checkpoint transactions. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index ab07efa81c77a..0557580404a7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -28,7 +28,7 @@ import java.net.URL; import java.util.List; -import com.google.common.math.LongMath; +import org.apache.hadoop.thirdparty.com.google.common.math.LongMath; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -43,7 +43,7 @@ import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.Daemon; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * The Checkpointer is responsible for supporting periodic checkpoints diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java index 0263f2a347faa..7a5963a6c57cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.FsAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java index 81d285a03626b..d17fd06bc882c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java @@ -21,7 +21,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index 26c5e9049f74d..8c3c0fd933a43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -45,9 +45,9 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Throwables; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Throwables; /** * An implementation of the abstract class {@link EditLogInputStream}, which diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java index 4dbe2720ddaa0..7fe84307df125 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.io.IOUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * An implementation of the abstract class {@link EditLogOutputStream}, which @@ -88,7 +88,12 @@ public EditLogFileOutputStream(Configuration conf, File name, int size) } else { rp = new RandomAccessFile(name, "rw"); } - fp = new FileOutputStream(rp.getFD()); // open for append + try { + fp = new FileOutputStream(rp.getFD()); // open for append + } catch (IOException e) { + IOUtils.closeStream(rp); + throw e; + } fc = rp.getChannel(); fc.position(fc.size()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java index be68f6d609008..affba028c08cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java @@ -32,7 +32,7 @@ import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A double-buffer for edits. New edits are written into the first buffer diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java index 938eacd7fd648..33954a2cc3993 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java @@ -19,7 +19,7 @@ import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Used to inject certain faults for testing. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 48e405ba35432..2f5fde8e94015 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -29,10 +29,10 @@ import java.util.NavigableMap; import java.util.TreeMap; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index d9f7e9afdc70d..d5fdba84633ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -343,6 +343,7 @@ public synchronized ErasureCodingPolicy addPolicy( policiesByName.values().toArray(new ErasureCodingPolicyInfo[0]); allPersistedPolicies.put(policy.getId(), new ErasureCodingPolicyInfo(policy)); + LOG.info("Added erasure coding policy " + policy); return policy; } @@ -414,7 +415,7 @@ public synchronized boolean disablePolicy(String name) { enabledPolicies = enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]); info.setState(ErasureCodingPolicyState.DISABLED); - LOG.info("Disable the erasure coding policy " + name); + LOG.info("Disabled the erasure coding policy " + name); allPersistedPolicies.put(info.getPolicy().getId(), createPolicyInfo(info.getPolicy(), ErasureCodingPolicyState.DISABLED)); @@ -448,7 +449,7 @@ public synchronized boolean enablePolicy(String name) { enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]); allPersistedPolicies.put(ecPolicy.getId(), createPolicyInfo(info.getPolicy(), ErasureCodingPolicyState.ENABLED)); - LOG.info("Enable the erasure coding policy " + name); + LOG.info("Enabled the erasure coding policy " + name); return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java index 31dc51a3c24b1..6ced588e96882 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.permission.AclEntry; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java index 919e853f4590f..2586a257beff3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature; import org.apache.hadoop.ipc.RetriableException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Helper class to perform append operation. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 8e9606dcf61fe..164368d28d6ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.util.EnumCounters; import org.apache.hadoop.security.AccessControlException; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.FileNotFoundException; import java.io.IOException; @@ -56,15 +56,18 @@ static FileStatus setPermission( throw new InvalidPathException(src); } INodesInPath iip; + boolean changed; fsd.writeLock(); try { iip = fsd.resolvePath(pc, src, DirOp.WRITE); fsd.checkOwner(pc, iip); - unprotectedSetPermission(fsd, iip, permission); + changed = unprotectedSetPermission(fsd, iip, permission); } finally { fsd.writeUnlock(); } - fsd.getEditLog().logSetPermissions(iip.getPath(), permission); + if (changed) { + fsd.getEditLog().logSetPermissions(iip.getPath(), permission); + } return fsd.getAuditFileInfo(iip); } @@ -75,6 +78,7 @@ static FileStatus setOwner( throw new InvalidPathException(src); } INodesInPath iip; + boolean changed; fsd.writeLock(); try { iip = fsd.resolvePath(pc, src, DirOp.WRITE); @@ -89,11 +93,13 @@ static FileStatus setOwner( "User " + pc.getUser() + " does not belong to " + group); } } - unprotectedSetOwner(fsd, iip, username, group); + changed = unprotectedSetOwner(fsd, iip, username, group); } finally { fsd.writeUnlock(); } - fsd.getEditLog().logSetOwner(iip.getPath(), username, group); + if (changed) { + fsd.getEditLog().logSetOwner(iip.getPath(), username, group); + } return fsd.getAuditFileInfo(iip); } @@ -257,28 +263,32 @@ static void setQuota(FSDirectory fsd, FSPermissionChecker pc, String src, } } - static void unprotectedSetPermission( + static boolean unprotectedSetPermission( FSDirectory fsd, INodesInPath iip, FsPermission permissions) throws FileNotFoundException, UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException { assert fsd.hasWriteLock(); final INode inode = FSDirectory.resolveLastINode(iip); int snapshotId = iip.getLatestSnapshotId(); + long oldPerm = inode.getPermissionLong(); inode.setPermission(permissions, snapshotId); + return oldPerm != inode.getPermissionLong(); } - static void unprotectedSetOwner( + static boolean unprotectedSetOwner( FSDirectory fsd, INodesInPath iip, String username, String groupname) throws FileNotFoundException, UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException { assert fsd.hasWriteLock(); final INode inode = FSDirectory.resolveLastINode(iip); + long oldPerm = inode.getPermissionLong(); if (username != null) { inode.setUser(username, iip.getLatestSnapshotId()); } if (groupname != null) { inode.setGroup(groupname, iip.getLatestSnapshotId()); } + return oldPerm != inode.getPermissionLong(); } static boolean setTimes( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index b92c4140b4a05..04ae358c67afb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.permission.FsAction; @@ -52,9 +52,9 @@ static FileStatus concat(FSDirectory fsd, FSPermissionChecker pc, String target, String[] srcs, boolean logRetryCache) throws IOException { validatePath(target, srcs); assert srcs != null; - if (FSDirectory.LOG.isDebugEnabled()) { - FSDirectory.LOG.debug("concat {} to {}", Arrays.toString(srcs), target); - } + NameNode.stateChangeLog.debug("DIR* NameSystem.concat: {} to {}", + Arrays.toString(srcs), target); + final INodesInPath targetIIP = fsd.resolvePath(pc, target, DirOp.WRITE); // write permission for the target if (fsd.isPermissionEnabled()) { @@ -66,11 +66,6 @@ static FileStatus concat(FSDirectory fsd, FSPermissionChecker pc, // check the srcs INodeFile[] srcFiles = verifySrcFiles(fsd, srcs, targetIIP, pc); - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.concat: " + - Arrays.toString(srcs) + " to " + target); - } - long timestamp = now(); fsd.writeLock(); try { @@ -150,7 +145,7 @@ private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs, + " is referred by some other reference in some snapshot."); } // source file cannot be the same with the target file - if (srcINode == targetINode) { + if (srcINode.equals(targetINode)) { throw new HadoopIllegalArgumentException("concat: the src file " + src + " is the same with the target file " + targetIIP.getPath()); } @@ -234,10 +229,8 @@ private static void verifyQuota(FSDirectory fsd, INodesInPath targetIIP, static void unprotectedConcat(FSDirectory fsd, INodesInPath targetIIP, INodeFile[] srcList, long timestamp) throws IOException { assert fsd.hasWriteLock(); - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to " - + targetIIP.getPath()); - } + NameNode.stateChangeLog.debug("DIR* NameSystem.concat to {}", + targetIIP.getPath()); final INodeFile trgInode = targetIIP.getLastINode().asFile(); QuotaCounts deltas = computeQuotaDeltas(fsd, trgInode, srcList); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java index 6c7b1fae50dec..516d59415401c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java @@ -52,8 +52,8 @@ import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo; import org.apache.hadoop.security.SecurityUtil; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 011c72ea49c6f..11981b27183d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java index 95e889888bb7f..da324fb46738a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.fs.permission.FsCreateModes; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.ParentNotDirectoryException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index 602f9962942be..2fd25237f16e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; @@ -262,6 +262,11 @@ static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc, throws IOException { final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK); final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK); + + if(fsd.isNonEmptyDirectory(srcIIP)) { + DFSUtil.checkProtectedDescendants(fsd, srcIIP); + } + if (fsd.isPermissionEnabled()) { boolean renameToTrash = false; if (null != options && diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java index 3f873d7eea5ff..4057bbd211c0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Helper class to perform storage policy satisfier related operations. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 7f07dd1feda1b..dfacc491eae53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index bf55d30591074..22b0e175018d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -38,7 +39,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Helper class to perform truncate operation. @@ -111,6 +112,10 @@ static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, + truncatedBlock.getNumBytes(); if (newLength == truncateLength) { return new TruncateResult(false, fsd.getAuditFileInfo(iip)); + } else { + throw new AlreadyBeingCreatedException( + RecoverLeaseOp.TRUNCATE_FILE.getExceptionMessage(src, + clientName, clientMachine, src + " is being truncated.")); } } } @@ -262,7 +267,11 @@ static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip, uc.setTruncateBlock(new BlockInfoContiguous(oldBlock, oldBlock.getReplication())); uc.getTruncateBlock().setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); - uc.getTruncateBlock().setGenerationStamp(newBlock.getGenerationStamp()); + final long newGenerationStamp = newBlock.getGenerationStamp(); + uc.getTruncateBlock().setGenerationStamp(newGenerationStamp); + // Update global generation stamp in Standby NameNode + blockManager.getBlockIdManager().setGenerationStampIfGreater( + newGenerationStamp); truncatedBlockUC = oldBlock; NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 99a80b537c208..0d9c6aeeb9c45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.hdfs.AddBlockFlag; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index ff82610f545bb..ce78b5b718e1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index c06b59f625a79..bd2ff5242e1c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -20,9 +20,9 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.util.StringUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -69,6 +69,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; @@ -88,6 +89,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE_DEFAULT; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY; @@ -168,6 +171,7 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) { // // Each entry in this set must be a normalized path. private volatile SortedSet protectedDirectories; + private final boolean isProtectedSubDirectoriesEnable; private final boolean isPermissionEnabled; private final boolean isPermissionContentSummarySubAccess; @@ -203,8 +207,47 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) { // will be bypassed private HashSet usersToBypassExtAttrProvider = null; - public void setINodeAttributeProvider(INodeAttributeProvider provider) { + // If external inode attribute provider is configured, use the new + // authorizeWithContext() API or not. + private boolean useAuthorizationWithContextAPI = false; + + public void setINodeAttributeProvider( + @Nullable INodeAttributeProvider provider) { attributeProvider = provider; + + if (attributeProvider == null) { + // attributeProvider is set to null during NN shutdown. + return; + } + + // if the runtime external authorization provider doesn't support + // checkPermissionWithContext(), fall back to the old API + // checkPermission(). + // This check is done only once during NameNode initialization to reduce + // runtime overhead. + Class[] cArg = new Class[1]; + cArg[0] = INodeAttributeProvider.AuthorizationContext.class; + + INodeAttributeProvider.AccessControlEnforcer enforcer = + attributeProvider.getExternalAccessControlEnforcer(null); + + // If external enforcer is null, we use the default enforcer, which + // supports the new API. + if (enforcer == null) { + useAuthorizationWithContextAPI = true; + return; + } + + try { + Class clazz = enforcer.getClass(); + clazz.getDeclaredMethod("checkPermissionWithContext", cArg); + useAuthorizationWithContextAPI = true; + LOG.info("Use the new authorization provider API"); + } catch (NoSuchMethodException e) { + useAuthorizationWithContextAPI = false; + LOG.info("Fallback to the old authorization provider API because " + + "the expected method is not found."); + } } /** @@ -341,6 +384,9 @@ public enum DirOp { DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); this.protectedDirectories = parseProtectedDirectories(conf); + this.isProtectedSubDirectoriesEnable = conf.getBoolean( + DFS_PROTECTED_SUBDIRECTORIES_ENABLE, + DFS_PROTECTED_SUBDIRECTORIES_ENABLE_DEFAULT); Preconditions.checkArgument(this.inodeXAttrsLimit >= 0, "Cannot set a negative limit on the number of xattrs per inode (%s).", @@ -501,6 +547,10 @@ public SortedSet getProtectedDirectories() { return protectedDirectories; } + public boolean isProtectedSubDirectoriesEnable() { + return isProtectedSubDirectoriesEnable; + } + /** * Set directories that cannot be removed unless empty, even by an * administrator. @@ -684,6 +734,26 @@ public INodesInPath resolvePath(FSPermissionChecker pc, String src, return iip; } + /** + * This method should only be used from internal paths and not those provided + * directly by a user. It resolves a given path into an INodesInPath in a + * similar way to resolvePath(...), only traversal and permissions are not + * checked. + * @param src The path to resolve. + * @return if the path indicates an inode, return path after replacing up to + * {@code } with the corresponding path of the inode, else + * the path in {@code src} as is. If the path refers to a path in + * the "raw" directory, return the non-raw pathname. + * @throws FileNotFoundException + */ + public INodesInPath unprotectedResolvePath(String src) + throws FileNotFoundException { + byte[][] components = INode.getPathComponents(src); + boolean isRaw = isReservedRawName(components); + components = resolveComponents(components, this); + return INodesInPath.resolve(rootDir, components, isRaw); + } + INodesInPath resolvePath(FSPermissionChecker pc, String src, long fileId) throws UnresolvedLinkException, FileNotFoundException, AccessControlException, ParentNotDirectoryException { @@ -1139,7 +1209,8 @@ static void verifyQuota(INodesInPath iip, int pos, QuotaCounts deltas, // check existing components in the path for(int i = (pos > iip.length() ? iip.length(): pos) - 1; i >= 0; i--) { - if (commonAncestor == iip.getINode(i)) { + if (commonAncestor == iip.getINode(i) + && !commonAncestor.isInLatestSnapshot(iip.getLatestSnapshotId())) { // Stop checking for quota when common ancestor is reached return; } @@ -1784,7 +1855,8 @@ FSPermissionChecker getPermissionChecker() FSPermissionChecker getPermissionChecker(String fsOwner, String superGroup, UserGroupInformation ugi) throws AccessControlException { return new FSPermissionChecker( - fsOwner, superGroup, ugi, getUserFilteredAttributeProvider(ugi)); + fsOwner, superGroup, ugi, getUserFilteredAttributeProvider(ugi), + useAuthorizationWithContextAPI); } void checkOwner(FSPermissionChecker pc, INodesInPath iip) @@ -1980,7 +2052,23 @@ INodeAttributes getAttributes(INodesInPath iip) // first empty component for the root. however file status // related calls are expected to strip out the root component according // to TestINodeAttributeProvider. - byte[][] components = iip.getPathComponents(); + // Due to HDFS-15372 the attribute provider should received the resolved + // snapshot path. Ie, rather than seeing /d/.snapshot/sn/data it should + // see /d/data. However, for the path /d/.snapshot/sn it should see this + // full path. If the current inode is the snapshot name, it always has the + // same ID as its parent inode, so we can use that to check if it is the + // path which needs handled specially. + byte[][] components; + INodeDirectory parent = node.getParent(); + if (iip.isSnapshot() + && parent != null && parent.getId() != node.getId()) { + // For snapshot paths, we always user node.getPathComponents so the + // snapshot path is resolved to the real path, unless the last component + // is the snapshot name root directory. + components = node.getPathComponents(); + } else { + components = iip.getPathComponents(); + } components = Arrays.copyOfRange(components, 1, components.length); nodeAttrs = ap.getAttributes(components, nodeAttrs); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index be8c684f01508..2ef3a028acd2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -109,9 +109,9 @@ import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.token.delegation.DelegationKey; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java index 2b47398f40c98..e73dfa7797df8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java @@ -33,8 +33,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.util.ExitUtil; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; class FSEditLogAsync extends FSEditLog implements Runnable { static final Logger LOG = LoggerFactory.getLogger(FSEditLog.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 294296d2d36d5..2ac0eb15b949f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -116,9 +116,9 @@ import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.Timer; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import static org.apache.hadoop.log.LogThrottlingHelper.LogAction; @@ -798,7 +798,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, final String snapshotRoot = renameReservedPathsOnUpgrade(createSnapshotOp.snapshotRoot, logVersion); - INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE); + INodesInPath iip = fsDir.unprotectedResolvePath(snapshotRoot); String path = fsNamesys.getSnapshotManager().createSnapshot( fsDir.getFSNamesystem().getLeaseManager(), iip, snapshotRoot, createSnapshotOp.snapshotName, @@ -816,7 +816,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, final String snapshotRoot = renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot, logVersion); - INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE); + INodesInPath iip = fsDir.unprotectedResolvePath(snapshotRoot); fsNamesys.getSnapshotManager().deleteSnapshot(iip, deleteSnapshotOp.snapshotName, new INode.ReclaimContext(fsNamesys.dir.getBlockStoragePolicySuite(), @@ -838,7 +838,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, final String snapshotRoot = renameReservedPathsOnUpgrade(renameSnapshotOp.snapshotRoot, logVersion); - INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE); + INodesInPath iip = fsDir.unprotectedResolvePath(snapshotRoot); fsNamesys.getSnapshotManager().renameSnapshot(iip, snapshotRoot, renameSnapshotOp.snapshotOldName, renameSnapshotOp.snapshotNewName, renameSnapshotOp.mtime); @@ -1150,8 +1150,12 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, oldBlock.setNumBytes(newBlock.getNumBytes()); boolean changeMade = oldBlock.getGenerationStamp() != newBlock.getGenerationStamp(); - oldBlock.setGenerationStamp(newBlock.getGenerationStamp()); - + final long newGenerationStamp = newBlock.getGenerationStamp(); + oldBlock.setGenerationStamp(newGenerationStamp); + // Update global generation stamp in Standby NameNode + fsNamesys.getBlockManager().getBlockIdManager(). + setGenerationStampIfGreater(newGenerationStamp); + if (!oldBlock.isComplete() && (!isLastBlock || op.shouldCompleteLastBlock())) { changeMade = true; @@ -1234,7 +1238,7 @@ private void incrOpCount(FSEditLogOpCodes opCode, holder = new Holder(1); opCounts.put(opCode, holder); } else { - holder.held++; + holder.held = holder.held + 1; } counter.increment(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 963628f9ac4e3..feff8b48f7e21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -137,11 +137,11 @@ import org.xml.sax.SAXException; import org.xml.sax.helpers.AttributesImpl; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Helper classes for reading the ops from an InputStream. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 9b95f14bddc28..86b4150777edc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -75,9 +75,9 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Time; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * FSImage handles checkpointing and logging of the namespace edits. @@ -172,6 +172,7 @@ protected FSImage(Configuration conf, this.editLog = FSEditLog.newInstance(conf, storage, editsDirs); archivalManager = new NNStorageRetentionManager(conf, storage, editLog); + FSImageFormatProtobuf.initParallelLoad(conf); } void format(FSNamesystem fsn, String clusterId, boolean force) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index da067d15b6db5..478cec55d0dd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -75,8 +75,8 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.util.StringUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Contains inner classes for reading or writing the on-disk format for @@ -242,6 +242,7 @@ public void load(File file, boolean requireSameLayoutVersion) * the layout version. */ public static LoaderDelegator newLoader(Configuration conf, FSNamesystem fsn) { + return new LoaderDelegator(conf, fsn); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 6212e65e01db6..0a69c99cab810 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -28,8 +28,9 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,8 +72,8 @@ import org.apache.hadoop.hdfs.util.EnumCounters; import org.apache.hadoop.hdfs.util.ReadOnlyList; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.protobuf.ByteString; @InterfaceAudience.Private @@ -204,15 +205,20 @@ public static void updateBlocksMap(INodeFile file, BlockManager bm) { private final FSDirectory dir; private final FSNamesystem fsn; private final FSImageFormatProtobuf.Loader parent; - private ReentrantLock cacheNameMapLock; - private ReentrantLock blockMapLock; + + // Update blocks map by single thread asynchronously + private ExecutorService blocksMapUpdateExecutor; + // update name cache by single thread asynchronously. + private ExecutorService nameCacheUpdateExecutor; Loader(FSNamesystem fsn, final FSImageFormatProtobuf.Loader parent) { this.fsn = fsn; this.dir = fsn.dir; this.parent = parent; - cacheNameMapLock = new ReentrantLock(true); - blockMapLock = new ReentrantLock(true); + // Note: these executors must be SingleThreadExecutor, as they + // are used to modify structures which are not thread safe. + blocksMapUpdateExecutor = Executors.newSingleThreadExecutor(); + nameCacheUpdateExecutor = Executors.newSingleThreadExecutor(); } void loadINodeDirectorySectionInParallel(ExecutorService service, @@ -263,7 +269,6 @@ void loadINodeDirectorySectionInParallel(ExecutorService service, void loadINodeDirectorySection(InputStream in) throws IOException { final List refList = parent.getLoaderContext() .getRefList(); - ArrayList inodeList = new ArrayList<>(); while (true) { INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry .parseDelimitedFrom(in); @@ -274,15 +279,7 @@ void loadINodeDirectorySection(InputStream in) throws IOException { INodeDirectory p = dir.getInode(e.getParent()).asDirectory(); for (long id : e.getChildrenList()) { INode child = dir.getInode(id); - if (addToParent(p, child)) { - if (child.isFile()) { - inodeList.add(child); - } - if (inodeList.size() >= DIRECTORY_ENTRY_BATCH_SIZE) { - addToCacheAndBlockMap(inodeList); - inodeList.clear(); - } - } else { + if (!addToParent(p, child)) { LOG.warn("Failed to add the inode {} to the directory {}", child.getId(), p.getId()); } @@ -290,40 +287,79 @@ void loadINodeDirectorySection(InputStream in) throws IOException { for (int refId : e.getRefChildrenList()) { INodeReference ref = refList.get(refId); - if (addToParent(p, ref)) { - if (ref.isFile()) { - inodeList.add(ref); - } - if (inodeList.size() >= DIRECTORY_ENTRY_BATCH_SIZE) { - addToCacheAndBlockMap(inodeList); - inodeList.clear(); - } - } else { + if (!addToParent(p, ref)) { LOG.warn("Failed to add the inode reference {} to the directory {}", ref.getId(), p.getId()); } } } - addToCacheAndBlockMap(inodeList); } - private void addToCacheAndBlockMap(ArrayList inodeList) { - try { - cacheNameMapLock.lock(); - for (INode i : inodeList) { - dir.cacheName(i); - } - } finally { - cacheNameMapLock.unlock(); + private void fillUpInodeList(ArrayList inodeList, INode inode) { + if (inode.isFile()) { + inodeList.add(inode); } + if (inodeList.size() >= DIRECTORY_ENTRY_BATCH_SIZE) { + addToCacheAndBlockMap(inodeList); + inodeList.clear(); + } + } - try { - blockMapLock.lock(); - for (INode i : inodeList) { - updateBlocksMap(i.asFile(), fsn.getBlockManager()); + private void addToCacheAndBlockMap(final ArrayList inodeList) { + final ArrayList inodes = new ArrayList<>(inodeList); + nameCacheUpdateExecutor.submit( + new Runnable() { + @Override + public void run() { + addToCacheInternal(inodes); + } + }); + blocksMapUpdateExecutor.submit( + new Runnable() { + @Override + public void run() { + updateBlockMapInternal(inodes); + } + }); + } + + // update name cache with non-thread safe + private void addToCacheInternal(ArrayList inodeList) { + for (INode i : inodeList) { + dir.cacheName(i); + } + } + + // update blocks map with non-thread safe + private void updateBlockMapInternal(ArrayList inodeList) { + for (INode i : inodeList) { + updateBlocksMap(i.asFile(), fsn.getBlockManager()); + } + } + + void waitBlocksMapAndNameCacheUpdateFinished() throws IOException { + long start = System.currentTimeMillis(); + waitExecutorTerminated(blocksMapUpdateExecutor); + waitExecutorTerminated(nameCacheUpdateExecutor); + LOG.info("Completed update blocks map and name cache, total waiting " + + "duration {}ms.", (System.currentTimeMillis() - start)); + } + + private void waitExecutorTerminated(ExecutorService executorService) + throws IOException { + executorService.shutdown(); + long start = System.currentTimeMillis(); + while (!executorService.isTerminated()) { + try { + executorService.awaitTermination(1, TimeUnit.SECONDS); + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting to executor service terminated duration {}ms.", + (System.currentTimeMillis() - start)); + } + } catch (InterruptedException e) { + LOG.error("Interrupted waiting for executor terminated.", e); + throw new IOException(e); } - } finally { - blockMapLock.unlock(); } } @@ -340,6 +376,7 @@ private int loadINodesInSection(InputStream in, Counter counter) // As the input stream is a LimitInputStream, the reading will stop when // EOF is encountered at the end of the stream. int cntr = 0; + ArrayList inodeList = new ArrayList<>(); while (true) { INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in); if (p == null) { @@ -354,12 +391,16 @@ private int loadINodesInSection(InputStream in, Counter counter) synchronized(this) { dir.addToInodeMap(n); } + fillUpInodeList(inodeList, n); } cntr++; if (counter != null) { counter.increment(); } } + if (inodeList.size() > 0){ + addToCacheAndBlockMap(inodeList); + } return cntr; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index be21d1f80f9e9..183449f574ec9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -76,8 +76,8 @@ import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.Time; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; /** @@ -88,6 +88,8 @@ public final class FSImageFormatProtobuf { private static final Logger LOG = LoggerFactory .getLogger(FSImageFormatProtobuf.class); + private static volatile boolean enableParallelLoad = false; + public static final class LoaderContext { private SerialNumberManager.StringTable stringTable; private final ArrayList refList = Lists.newArrayList(); @@ -269,14 +271,20 @@ public InputStream getInputStreamForSection(FileSummary.Section section, String compressionCodec) throws IOException { FileInputStream fin = new FileInputStream(filename); - FileChannel channel = fin.getChannel(); - channel.position(section.getOffset()); - InputStream in = new BufferedInputStream(new LimitInputStream(fin, - section.getLength())); + try { - in = FSImageUtil.wrapInputStreamForCompression(conf, - compressionCodec, in); - return in; + FileChannel channel = fin.getChannel(); + channel.position(section.getOffset()); + InputStream in = new BufferedInputStream(new LimitInputStream(fin, + section.getLength())); + + in = FSImageUtil.wrapInputStreamForCompression(conf, + compressionCodec, in); + return in; + } catch (IOException e) { + fin.close(); + throw e; + } } /** @@ -447,6 +455,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { } else { inodeLoader.loadINodeDirectorySection(in); } + inodeLoader.waitBlocksMapAndNameCacheUpdateFinished(); break; case FILES_UNDERCONSTRUCTION: inodeLoader.loadFilesUnderConstructionSection(in); @@ -535,10 +544,9 @@ private void loadSecretManagerSection(InputStream in, StartupProgress prog, Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep); for (int i = 0; i < numTokens; ++i) { tokens.add(SecretManagerSection.PersistToken.parseDelimitedFrom(in)); - counter.increment(); } - fsn.loadSecretManagerState(s, keys, tokens); + fsn.loadSecretManagerState(s, keys, tokens, counter); } private void loadCacheManagerSection(InputStream in, StartupProgress prog, @@ -575,9 +583,7 @@ private void loadErasureCodingSection(InputStream in) } private static boolean enableParallelSaveAndLoad(Configuration conf) { - boolean loadInParallel = - conf.getBoolean(DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY, - DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT); + boolean loadInParallel = enableParallelLoad; boolean compressionEnabled = conf.getBoolean( DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, DFSConfigKeys.DFS_IMAGE_COMPRESS_DEFAULT); @@ -593,6 +599,20 @@ private static boolean enableParallelSaveAndLoad(Configuration conf) { return loadInParallel; } + public static void initParallelLoad(Configuration conf) { + enableParallelLoad = + conf.getBoolean(DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY, + DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT); + } + + public static void refreshParallelSaveAndLoad(boolean enable) { + enableParallelLoad = enable; + } + + public static boolean getEnableParallelLoad() { + return enableParallelLoad; + } + public static final class Saver { public static final int CHECK_CANCEL_INTERVAL = 4096; private boolean writeSubSections = false; @@ -633,10 +653,6 @@ public int getInodesPerSubSection() { return inodesPerSubSection; } - public boolean shouldWriteSubSections() { - return writeSubSections; - } - /** * Commit the length and offset of a fsimage section to the summary index, * including the sub section, which will be committed before the section is diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 706b2bda06299..3d75cebf729d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -53,7 +53,7 @@ import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Static utility functions for serializing various pieces of data in the correct diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java index b04007513c3fd..9f71f69902c86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; class FSImageTransactionalStorageInspector extends FSImageStorageInspector { public static final Logger LOG = LoggerFactory.getLogger( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 99ad6f2eb079f..194f1aff6470d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -107,7 +107,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.hdfs.protocol.BatchedDirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsPartialListing; @@ -336,12 +336,12 @@ import org.apache.log4j.AsyncAppender; import org.eclipse.jetty.util.ajax.JSON; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.LoggerFactory; /** @@ -815,7 +815,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) { LOG.info("Enabling async auditlog"); - enableAsyncAuditLog(); + enableAsyncAuditLog(conf); } fsLock = new FSNamesystemLock(conf, detailedLockHoldTimeMetrics); cond = fsLock.newWriteLockCondition(); @@ -1904,6 +1904,7 @@ private void metaSave(PrintWriter out) { */ BatchedListEntries listOpenFiles(long prevId, EnumSet openFilesTypes, String path) throws IOException { + INode.checkAbsolutePath(path); final String operationName = "listOpenFiles"; checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); @@ -2601,7 +2602,9 @@ private HdfsFileStatus startFileInt(String src, .append(", createFlag=").append(flag) .append(", blockSize=").append(blockSize) .append(", supportedVersions=") - .append(Arrays.toString(supportedVersions)); + .append(Arrays.toString(supportedVersions)) + .append(", ecPolicyName=").append(ecPolicyName) + .append(", storagePolicy=").append(storagePolicy); NameNode.stateChangeLog.debug(builder.toString()); } if (!DFSUtil.isValidName(src) || @@ -2758,7 +2761,7 @@ enum RecoverLeaseOp { TRUNCATE_FILE, RECOVER_LEASE; - private String getExceptionMessage(String src, String holder, + public String getExceptionMessage(String src, String holder, String clientMachine, String reason) { return "Failed to " + this + " " + src + " for " + holder + " on " + clientMachine + " because " + reason; @@ -3253,10 +3256,10 @@ boolean delete(String src, boolean recursive, boolean logRetryCache) throw e; } getEditLog().logSync(); + logAuditEvent(true, operationName, src); if (toRemovedBlocks != null) { removeBlocks(toRemovedBlocks); // Incremental deletion of blocks } - logAuditEvent(true, operationName, src); return ret; } @@ -3649,10 +3652,13 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, // if there are no valid replicas on data-nodes. String message = "DIR* NameSystem.internalReleaseLease: " + "Failed to release lease for file " + src + - ". Committed blocks are waiting to be minimally replicated." + - " Try again later."; + ". Committed blocks are waiting to be minimally replicated."; NameNode.stateChangeLog.warn(message); - throw new AlreadyBeingCreatedException(message); + if (!penultimateBlockMinStorage) { + throw new AlreadyBeingCreatedException(message); + } + // Intentionally fall through to UNDER_RECOVERY so BLOCK_RECOVERY is + // attempted case UNDER_CONSTRUCTION: case UNDER_RECOVERY: BlockUnderConstructionFeature uc = @@ -4015,14 +4021,10 @@ void closeFileCommitBlocks(String src, INodeFile pendingFile, */ void renewLease(String holder) throws IOException { checkOperation(OperationCategory.WRITE); - readLock(); - try { - checkOperation(OperationCategory.WRITE); - checkNameNodeSafeMode("Cannot renew lease for " + holder); - leaseManager.renewLease(holder); - } finally { - readUnlock("renewLease"); - } + checkNameNodeSafeMode("Cannot renew lease for " + holder); + // fsn is not mutated so lock is not required. the leaseManger is also + // thread-safe. + leaseManager.renewLease(holder); } /** @@ -4558,7 +4560,8 @@ public long getMissingReplOneBlocksCount() { return blockManager.getMissingReplOneBlocksCount(); } - @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"}) + @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"}, + type = Metric.Type.COUNTER) public int getExpiredHeartbeats() { return datanodeStatistics.getExpiredHeartbeats(); } @@ -4755,6 +4758,20 @@ public int getFsLockQueueLength() { return fsLock.getQueueLength(); } + @Metric(value = {"ReadLockLongHoldCount", "The number of time " + + "the read lock has been held for longer than the threshold"}, + type = Metric.Type.COUNTER) + public long getNumOfReadLockLongHold() { + return fsLock.getNumOfReadLockLongHold(); + } + + @Metric(value = {"WriteLockLongHoldCount", "The number of time " + + "the write lock has been held for longer than the threshold"}, + type = Metric.Type.COUNTER) + public long getNumOfWriteLockLongHold() { + return fsLock.getNumOfWriteLockLongHold(); + } + int getNumberOfDatanodes(DatanodeReportType type) { readLock(); try { @@ -5504,6 +5521,19 @@ public int getNumDecomDeadDataNodes() { return deadDecommissioned; } + @Override // FSNamesystemMBean + @Metric({"NumInServiceLiveDataNodes", + "Number of live datanodes which are currently in service"}) + public int getNumInServiceLiveDataNodes() { + final List live = new ArrayList(); + getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); + int liveInService = live.size(); + for (DatanodeDescriptor node : live) { + liveInService -= node.isInMaintenance() ? 1 : 0; + } + return liveInService; + } + @Override // FSNamesystemMBean @Metric({"VolumeFailuresTotal", "Total number of volume failures across all Datanodes"}) @@ -6153,8 +6183,10 @@ void loadSecretManagerStateCompat(DataInput in) throws IOException { void loadSecretManagerState(SecretManagerSection s, List keys, - List tokens) throws IOException { - dtSecretManager.loadSecretManagerState(new SecretManagerState(s, keys, tokens)); + List tokens, + StartupProgress.Counter counter) throws IOException { + dtSecretManager.loadSecretManagerState(new SecretManagerState(s, keys, tokens), + counter); } /** @@ -6239,13 +6271,19 @@ boolean isExternalInvocation() { private static UserGroupInformation getRemoteUser() throws IOException { return NameNode.getRemoteUser(); } - + /** - * Log fsck event in the audit log + * Log fsck event in the audit log. + * + * @param succeeded Whether authorization succeeded. + * @param src Path of affected source file. + * @param remoteAddress Remote address of the request. + * @throws IOException if {@link #getRemoteUser()} fails. */ - void logFsckEvent(String src, InetAddress remoteAddress) throws IOException { + void logFsckEvent(boolean succeeded, String src, InetAddress remoteAddress) + throws IOException { if (isAuditEnabled()) { - logAuditEvent(true, getRemoteUser(), + logAuditEvent(succeeded, getRemoteUser(), remoteAddress, "fsck", src, null, null); } @@ -8058,7 +8096,6 @@ boolean disableErasureCodingPolicy(String ecPolicyName, checkOperation(OperationCategory.WRITE); checkErasureCodingSupported(operationName); boolean success = false; - LOG.info("Disable the erasure coding policy " + ecPolicyName); try { writeLock(); try { @@ -8472,7 +8509,7 @@ public void logAuditMessage(String message) { } } - private static void enableAsyncAuditLog() { + private static void enableAsyncAuditLog(Configuration conf) { if (!(auditLog instanceof Log4JLogger)) { LOG.warn("Log4j is required to enable async auditlog"); return; @@ -8483,6 +8520,14 @@ private static void enableAsyncAuditLog() { // failsafe against trying to async it more than once if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) { AsyncAppender asyncAppender = new AsyncAppender(); + asyncAppender.setBlocking(conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_KEY, + DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_DEFAULT + )); + asyncAppender.setBufferSize(conf.getInt( + DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_KEY, + DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_DEFAULT + )); // change logger to have an async appender containing all the // previously configured appenders for (Appender appender : appenders) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java index b87117c2318e7..6502c4c2b4dec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java @@ -25,7 +25,7 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.log.LogThrottlingHelper; @@ -108,6 +108,16 @@ public Long initialValue() { private final AtomicReference longestReadLockHeldInfo = new AtomicReference<>(new LockHeldInfo(0, 0, null)); private LockHeldInfo longestWriteLockHeldInfo = new LockHeldInfo(0, 0, null); + /** + * The number of time the read lock + * has been held longer than the threshold. + */ + private final AtomicLong numReadLockLongHold = new AtomicLong(0); + /** + * The number of time the write lock + * has been held for longer than the threshold. + */ + private final AtomicLong numWriteLockLongHold = new AtomicLong(0); @VisibleForTesting static final String OP_NAME_OTHER = "OTHER"; @@ -176,6 +186,7 @@ public void readUnlock(String opName) { final long readLockIntervalMs = TimeUnit.NANOSECONDS.toMillis(readLockIntervalNanos); if (needReport && readLockIntervalMs >= this.readLockReportingThresholdMs) { + numReadLockLongHold.incrementAndGet(); LockHeldInfo localLockHeldInfo; do { localLockHeldInfo = longestReadLockHeldInfo.get(); @@ -253,6 +264,7 @@ public void writeUnlock(String opName, boolean suppressWriteLockReport) { LogAction logAction = LogThrottlingHelper.DO_NOT_LOG; if (needReport && writeLockIntervalMs >= this.writeLockReportingThresholdMs) { + numWriteLockLongHold.incrementAndGet(); if (longestWriteLockHeldInfo.getIntervalMs() < writeLockIntervalMs) { longestWriteLockHeldInfo = new LockHeldInfo(currentTimeMs, writeLockIntervalMs, @@ -302,6 +314,28 @@ public Condition newWriteLockCondition() { return coarseLock.writeLock().newCondition(); } + /** + * Returns the number of time the read lock + * has been held longer than the threshold. + * + * @return long - Number of time the read lock + * has been held longer than the threshold + */ + public long getNumOfReadLockLongHold() { + return numReadLockLongHold.get(); + } + + /** + * Returns the number of time the write lock + * has been held longer than the threshold. + * + * @return long - Number of time the write lock + * has been held longer than the threshold. + */ + public long getNumOfWriteLockLongHold() { + return numWriteLockLongHold.get(); + } + /** * Returns the QueueLength of waiting threads. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index c4fd6a6d3a49a..a539bf6f17627 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -19,11 +19,12 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Stack; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.ipc.CallerContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,11 +90,16 @@ private String toAccessControlString(INodeAttributes inodeAttrib, private static ThreadLocal operationType = new ThreadLocal<>(); - protected FSPermissionChecker(String fsOwner, String supergroup, UserGroupInformation callerUgi, INodeAttributeProvider attributeProvider) { - boolean useNewAuthorizationWithContextAPI; + this(fsOwner, supergroup, callerUgi, attributeProvider, false); + } + + protected FSPermissionChecker(String fsOwner, String supergroup, + UserGroupInformation callerUgi, + INodeAttributeProvider attributeProvider, + boolean useAuthorizationWithContextAPI) { this.fsOwner = fsOwner; this.supergroup = supergroup; this.callerUgi = callerUgi; @@ -102,36 +108,15 @@ protected FSPermissionChecker(String fsOwner, String supergroup, isSuper = user.equals(fsOwner) || groups.contains(supergroup); this.attributeProvider = attributeProvider; - // If the AccessControlEnforcer supports context enrichment, call - // the new API. Otherwise choose the old API. - Class[] cArg = new Class[1]; - cArg[0] = INodeAttributeProvider.AuthorizationContext.class; - - AccessControlEnforcer ace; if (attributeProvider == null) { // If attribute provider is null, use FSPermissionChecker default // implementation to authorize, which supports authorization with context. - useNewAuthorizationWithContextAPI = true; - LOG.info("Default authorization provider supports the new authorization" + + authorizeWithContext = true; + LOG.debug("Default authorization provider supports the new authorization" + " provider API"); } else { - ace = attributeProvider.getExternalAccessControlEnforcer(this); - // if the runtime external authorization provider doesn't support - // checkPermissionWithContext(), fall back to the old API - // checkPermission(). - try { - Class clazz = ace.getClass(); - clazz.getDeclaredMethod("checkPermissionWithContext", cArg); - useNewAuthorizationWithContextAPI = true; - LOG.info("Use the new authorization provider API"); - } catch (NoSuchMethodException e) { - useNewAuthorizationWithContextAPI = false; - LOG.info("Fallback to the old authorization provider API because " + - "the expected method is not found."); - } + authorizeWithContext = useAuthorizationWithContextAPI; } - - authorizeWithContext = useNewAuthorizationWithContextAPI; } public static void setOperationType(String opType) { @@ -223,7 +208,7 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length]; final byte[][] components = inodesInPath.getPathComponents(); for (int i = 0; i < inodes.length && inodes[i] != null; i++) { - inodeAttrs[i] = getINodeAttrs(components, i, inodes[i], snapshotId); + inodeAttrs[i] = getINodeAttrs(inodes[i], snapshotId); } String path = inodesInPath.getPath(); @@ -273,8 +258,7 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, void checkPermission(INode inode, int snapshotId, FsAction access) throws AccessControlException { byte[][] pathComponents = inode.getPathComponents(); - INodeAttributes nodeAttributes = getINodeAttrs(pathComponents, - pathComponents.length - 1, inode, snapshotId); + INodeAttributes nodeAttributes = getINodeAttrs(inode, snapshotId); try { INodeAttributes[] iNodeAttr = {nodeAttributes}; AccessControlEnforcer enforcer = getAccessControlEnforcer(); @@ -383,23 +367,31 @@ public void checkPermissionWithContext( authzContext.getSubAccess(), authzContext.isIgnoreEmptyDir()); } - private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx, - INode inode, int snapshotId) { + private INodeAttributes getINodeAttrs(INode inode, int snapshotId) { INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId); + /** + * This logic is similar to {@link FSDirectory#getAttributes()} and it + * ensures that the attribute provider sees snapshot paths resolved to their + * original location. This means the attributeProvider can apply permissions + * to the snapshot paths in the same was as the live paths. See HDFS-15372. + */ if (getAttributesProvider() != null) { - String[] elements = new String[pathIdx + 1]; /** - * {@link INode#getPathComponents(String)} returns a null component - * for the root only path "/". Assign an empty string if so. + * If we have an inode representing a path like /d/.snapshot/snap1 + * then calling inode.getPathComponents returns [null, d, snap1]. If we + * call inode.getFullPathName() it will return /d/.snapshot/snap1. For + * this special path (snapshot root) the attribute provider should see: + * + * [null, d, .snapshot/snap1] + * + * Using IIP.resolveFromRoot, it will take the inode fullPathName and + * construct an IIP object that give the correct components as above. */ - if (pathByNameArr.length == 1 && pathByNameArr[0] == null) { - elements[0] = ""; - } else { - for (int i = 0; i < elements.length; i++) { - elements[i] = DFSUtil.bytes2String(pathByNameArr[i]); - } - } - inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs); + INodesInPath iip = INodesInPath.resolveFromRoot(inode); + byte[][] components = iip.getPathComponents(); + components = Arrays.copyOfRange(components, 1, components.length); + inodeAttrs = getAttributesProvider() + .getAttributes(components, inodeAttrs); } return inodeAttrs; } @@ -455,7 +447,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (!(cList.isEmpty() && ignoreEmptyDir)) { //TODO have to figure this out with inodeattribute provider INodeAttributes inodeAttr = - getINodeAttrs(components, pathIdx, d, snapshotId); + getINodeAttrs(d, snapshotId); if (!hasPermission(inodeAttr, access)) { throw new AccessControlException( toAccessControlString(inodeAttr, d.getFullPathName(), access)); @@ -473,7 +465,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (inodeAttr.getFsPermission().getStickyBit()) { for (INode child : cList) { INodeAttributes childInodeAttr = - getINodeAttrs(components, pathIdx, child, snapshotId); + getINodeAttrs(child, snapshotId); if (isStickyBitViolated(inodeAttr, childInodeAttr)) { List allComponentList = new ArrayList<>(); for (int i = 0; i <= pathIdx; ++i) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java index 2acbda4005b57..a90dc27a54fa8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * FSTreeTraverser traverse directory recursively and process files diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 78394ab1520e5..9b6f82f088450 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -45,11 +45,11 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.io.nativeio.NativeIO; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; /** * Journal manager for the common case of edits files being written diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java index 5fae9cd48901b..e5c02e81dccfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java @@ -55,21 +55,25 @@ public void doGet(HttpServletRequest request, HttpServletResponse response final UserGroupInformation ugi = getUGI(request, conf); try { - ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); - - final FSNamesystem namesystem = nn.getNamesystem(); - final BlockManager bm = namesystem.getBlockManager(); - final int totalDatanodes = - namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); - new NamenodeFsck(conf, nn, - bm.getDatanodeManager().getNetworkTopology(), pmap, out, - totalDatanodes, remoteAddress).fsck(); - - return null; + ugi.doAs((PrivilegedExceptionAction) () -> { + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); + + final FSNamesystem namesystem = nn.getNamesystem(); + final BlockManager bm = namesystem.getBlockManager(); + final int totalDatanodes = + namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); + NamenodeFsck fsck = new NamenodeFsck(conf, nn, + bm.getDatanodeManager().getNetworkTopology(), pmap, out, + totalDatanodes, remoteAddress); + String auditSource = fsck.getAuditSource(); + boolean success = false; + try { + fsck.fsck(); + success = true; + } finally { + namesystem.logFsckEvent(success, auditSource, remoteAddress); } + return null; }); } catch (InterruptedException e) { response.sendError(400, e.getMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 6b29b33f3fd0c..03f01eb32eef1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -17,27 +17,21 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.PrintStream; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.util.List; -import java.util.Map; - -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Maps; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature; -import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; @@ -46,9 +40,14 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.List; +import java.util.Map; /** * We keep an in-memory representation of the file/block hierarchy. @@ -225,6 +224,27 @@ public INodeAttributes getSnapshotINode(final int snapshotId) { return this; } + /** Is this inode in the current state? */ + public boolean isInCurrentState() { + if (isRoot()) { + return true; + } + final INodeDirectory parentDir = getParent(); + if (parentDir == null) { + return false; // this inode is only referenced in snapshots + } + if (!parentDir.isInCurrentState()) { + return false; + } + final INode child = parentDir.getChild(getLocalNameBytes(), + Snapshot.CURRENT_STATE_ID); + if (this == child) { + return true; + } + return child != null && child.isReference() && + this.equals(child.asReference().getReferredINode()); + } + /** Is this inode in the latest snapshot? */ public final boolean isInLatestSnapshot(final int latestSnapshotId) { if (latestSnapshotId == Snapshot.CURRENT_STATE_ID || @@ -234,6 +254,8 @@ public final boolean isInLatestSnapshot(final int latestSnapshotId) { // if parent is a reference node, parent must be a renamed node. We can // stop the check at the reference node. if (parent != null && parent.isReference()) { + // TODO: Is it a bug to return true? + // Some ancestor nodes may not be in the latest snapshot. return true; } final INodeDirectory parentDir = getParent(); @@ -806,7 +828,7 @@ static boolean isValidAbsolutePath(final String path){ return path != null && path.startsWith(Path.SEPARATOR); } - private static void checkAbsolutePath(final String path) { + static void checkAbsolutePath(final String path) { if (!isValidAbsolutePath(path)) { throw new AssertionError("Absolute path required, but got '" + path + "'"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java index 63c5b46b2fbf0..e83c962a4a845 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 3070957e3deeb..657db8b7974a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -40,8 +40,8 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ReadOnlyList; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.security.AccessControlException; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; @@ -544,7 +544,7 @@ public boolean removeChild(final INode child) { } final INode removed = children.remove(i); - Preconditions.checkState(removed == child); + Preconditions.checkState(removed.equals(child)); return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java index 240aa15ee5fb5..5e5c4b4b81fb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.util.EnumCounters; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * The attributes of an inode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 67c86b375780d..beb71e3a3098d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -57,8 +57,8 @@ import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.io.erasurecode.ErasureCodeConstants.REPLICATION_POLICY_ID; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** I-node for closed file. */ @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java index bc273d28d7f99..f35949fdcdbed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java @@ -26,7 +26,7 @@ import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Storing all the {@link INode}s and maintaining the mapping between INode ID diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 8de0ed6d5de22..ce37f0afa2dbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.security.AccessControlException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java index b7d2f2c1e5abc..cd3f842d2641b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.util.LongBitFormat; import org.apache.hadoop.util.LightWeightGSet.LinkedElement; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * {@link INode} with additional fields including id, name, permission, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index f072220677733..8a150f0630f6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.ID_INTEGER_COMPARATOR; @@ -135,6 +135,27 @@ static INodesInPath resolve(final INodeDirectory startingDir, return resolve(startingDir, components, false); } + /** + * Retrieves the existing INodes from a path, starting at the root directory. + * The root directory is located by following the parent link in the inode + * recursively until the final root inode is found. + * The inodes returned will depend upon the output of inode.getFullPathName(). + * For a snapshot path, like /data/.snapshot/snap1, it will be resolved to: + * [null, data, .snapshot/snap1] + * For a file in the snapshot, as inode.getFullPathName resolves the snapshot + * information, the returned inodes for a path like /data/.snapshot/snap1/d1 + * would be: + * [null, data, d1] + * @param inode the {@link INode} to be resolved + * @return INodesInPath + */ + static INodesInPath resolveFromRoot(INode inode) { + INode[] inodes = getINodes(inode); + byte[][] paths = INode.getPathComponents(inode.getFullPathName()); + INodeDirectory rootDir = inodes[0].asDirectory(); + return resolve(rootDir, paths); + } + static INodesInPath resolve(final INodeDirectory startingDir, byte[][] components, final boolean isRaw) { Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index 91f24dd1137f2..54825d8a8a261 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.security.SecurityUtil; +import org.eclipse.jetty.server.Response; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -63,8 +64,8 @@ import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.StringUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * This class is used in Namesystem's jetty to retrieve/upload a file @@ -99,12 +100,38 @@ public class ImageServlet extends HttpServlet { "recent.image.check.enabled"; public static final boolean RECENT_IMAGE_CHECK_ENABLED_DEFAULT = true; + /* + * Specify a relaxation for the time delta check, the relaxation is to account + * for the scenario that there are chances that minor time difference (e.g. + * due to image upload delay, or minor machine clock skew) can cause ANN to + * reject a fsImage too aggressively. + */ + private static double recentImageCheckTimePrecision = 0.75; + + @VisibleForTesting + static void setRecentImageCheckTimePrecision(double ratio) { + recentImageCheckTimePrecision = ratio; + } + + private FSImage getAndValidateFSImage(ServletContext context, + final HttpServletResponse response) + throws IOException { + final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context); + if (nnImage == null) { + String errorMsg = "NameNode initialization not yet complete. " + + "FSImage has not been set in the NameNode."; + sendError(response, HttpServletResponse.SC_FORBIDDEN, errorMsg); + throw new IOException(errorMsg); + } + return nnImage; + } + @Override public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { try { final ServletContext context = getServletContext(); - final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context); + final FSImage nnImage = getAndValidateFSImage(context, response); final GetImageParams parsedParams = new GetImageParams(request, response); final Configuration conf = (Configuration) context .getAttribute(JspHelper.CURRENT_CONF); @@ -192,7 +219,7 @@ private void serveFile(File file) throws IOException { } catch (Throwable t) { String errMsg = "GetImage failed. " + StringUtils.stringifyException(t); - response.sendError(HttpServletResponse.SC_GONE, errMsg); + sendError(response, HttpServletResponse.SC_GONE, errMsg); throw new IOException(errMsg); } finally { response.getOutputStream().close(); @@ -208,7 +235,7 @@ private void validateRequest(ServletContext context, Configuration conf, conf)) { String errorMsg = "Only Namenode, Secondary Namenode, and administrators may access " + "this servlet"; - response.sendError(HttpServletResponse.SC_FORBIDDEN, errorMsg); + sendError(response, HttpServletResponse.SC_FORBIDDEN, errorMsg); LOG.warn("Received non-NN/SNN/administrator request for image or edits from " + request.getUserPrincipal().getName() + " at " @@ -221,7 +248,7 @@ private void validateRequest(ServletContext context, Configuration conf, && !myStorageInfoString.equals(theirStorageInfoString)) { String errorMsg = "This namenode has storage info " + myStorageInfoString + " but the secondary expected " + theirStorageInfoString; - response.sendError(HttpServletResponse.SC_FORBIDDEN, errorMsg); + sendError(response, HttpServletResponse.SC_FORBIDDEN, errorMsg); LOG.warn("Received an invalid request file transfer request " + "from a secondary with storage info " + theirStorageInfoString); throw new IOException(errorMsg); @@ -511,7 +538,7 @@ protected void doPut(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { try { ServletContext context = getServletContext(); - final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context); + final FSImage nnImage = getAndValidateFSImage(context, response); final Configuration conf = (Configuration) getServletContext() .getAttribute(JspHelper.CURRENT_CONF); final PutImageParams parsedParams = new PutImageParams(request, response, @@ -552,7 +579,7 @@ public Void run() throws Exception { // we need a different response type here so the client can differentiate this // from the failure to upload due to (1) security, or (2) other checkpoints already // present - response.sendError(HttpServletResponse.SC_EXPECTATION_FAILED, + sendError(response, HttpServletResponse.SC_EXPECTATION_FAILED, "Nameode "+request.getLocalAddr()+" is currently not in a state which can " + "accept uploads of new fsimages. State: "+state); return null; @@ -567,7 +594,7 @@ public Void run() throws Exception { // if the node is attempting to upload an older transaction, we ignore it SortedSet larger = currentlyDownloadingCheckpoints.tailSet(imageRequest); if (larger.size() > 0) { - response.sendError(HttpServletResponse.SC_CONFLICT, + sendError(response, HttpServletResponse.SC_CONFLICT, "Another checkpointer is already in the process of uploading a" + " checkpoint made up to transaction ID " + larger.last()); return null; @@ -575,7 +602,7 @@ public Void run() throws Exception { //make sure no one else has started uploading one if (!currentlyDownloadingCheckpoints.add(imageRequest)) { - response.sendError(HttpServletResponse.SC_CONFLICT, + sendError(response, HttpServletResponse.SC_CONFLICT, "Either current namenode is checkpointing or another" + " checkpointer is already in the process of " + "uploading a checkpoint made at transaction ID " @@ -592,6 +619,9 @@ public Void run() throws Exception { long checkpointPeriod = conf.getTimeDuration(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT, TimeUnit.SECONDS); + checkpointPeriod = Math.round( + checkpointPeriod * recentImageCheckTimePrecision); + long checkpointTxnCount = conf.getLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT); @@ -612,21 +642,24 @@ public Void run() throws Exception { // a new fsImage // 1. most recent image's txid is too far behind // 2. last checkpoint time was too old - response.sendError(HttpServletResponse.SC_CONFLICT, - "Most recent checkpoint is neither too far behind in " - + "txid, nor too old. New txnid cnt is " - + (txid - lastCheckpointTxid) - + ", expecting at least " + checkpointTxnCount - + " unless too long since last upload."); + String message = "Rejecting a fsimage due to small time delta " + + "and txnid delta. Time since previous checkpoint is " + + timeDelta + " expecting at least " + checkpointPeriod + + " txnid delta since previous checkpoint is " + + (txid - lastCheckpointTxid) + " expecting at least " + + checkpointTxnCount; + LOG.info(message); + sendError(response, HttpServletResponse.SC_CONFLICT, message); return null; } try { if (nnImage.getStorage().findImageFile(nnf, txid) != null) { - response.sendError(HttpServletResponse.SC_CONFLICT, - "Either current namenode has checkpointed or " - + "another checkpointer already uploaded an " - + "checkpoint for txid " + txid); + String message = "Either current namenode has checkpointed or " + + "another checkpointer already uploaded an " + + "checkpoint for txid " + txid; + LOG.info(message); + sendError(response, HttpServletResponse.SC_CONFLICT, message); return null; } @@ -663,11 +696,20 @@ public Void run() throws Exception { }); } catch (Throwable t) { String errMsg = "PutImage failed. " + StringUtils.stringifyException(t); - response.sendError(HttpServletResponse.SC_GONE, errMsg); + sendError(response, HttpServletResponse.SC_GONE, errMsg); throw new IOException(errMsg); } } + private void sendError(HttpServletResponse response, int code, String message) + throws IOException { + if (response instanceof Response) { + ((Response)response).setStatusWithReason(code, message); + } + + response.sendError(code, message); + } + /* * Params required to handle put image request */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java index 8a54c8a775945..ba40da4d843b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.inotify.Event; import org.apache.hadoop.hdfs.inotify.EventBatch; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index a5df9f53b9d97..e2b7b1c68bad4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -24,8 +24,10 @@ import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.PriorityQueue; import java.util.SortedSet; import java.util.concurrent.CopyOnWriteArrayList; @@ -38,14 +40,10 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableListMultimap; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimaps; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * Manages a collection of Journals. None of the methods are synchronized, it is @@ -634,7 +632,7 @@ public void apply(JournalAndStream jas) throws IOException { */ public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) { // Collect RemoteEditLogs available from each FileJournalManager - List allLogs = Lists.newArrayList(); + List allLogs = new ArrayList<>(); for (JournalAndStream j : journals) { if (j.getManager() instanceof FileJournalManager) { FileJournalManager fjm = (FileJournalManager)j.getManager(); @@ -645,15 +643,17 @@ public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) { } } } - // Group logs by their starting txid - ImmutableListMultimap logsByStartTxId = - Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID); + final Map> logsByStartTxId = new HashMap<>(); + allLogs.forEach(input -> { + long key = RemoteEditLog.GET_START_TXID.apply(input); + logsByStartTxId.computeIfAbsent(key, k-> new ArrayList<>()).add(input); + }); long curStartTxId = fromTxId; - - List logs = Lists.newArrayList(); + List logs = new ArrayList<>(); while (true) { - ImmutableList logGroup = logsByStartTxId.get(curStartTxId); + List logGroup = + logsByStartTxId.getOrDefault(curStartTxId, Collections.emptyList()); if (logGroup.isEmpty()) { // we have a gap in logs - for example because we recovered some old // storage directory with ancient logs. Clear out any logs we've diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index c3624ce303385..f6f240dae1f3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -23,21 +23,19 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; -import java.util.NavigableSet; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; -import java.util.TreeSet; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; @@ -52,8 +50,8 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.util.Daemon; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -92,21 +90,11 @@ public class LeaseManager { private long lastHolderUpdateTime; private String internalLeaseHolder; + // // Used for handling lock-leases // Mapping: leaseHolder -> Lease - private final SortedMap leases = new TreeMap<>(); - // Set of: Lease - private final NavigableSet sortedLeases = new TreeSet<>( - new Comparator() { - @Override - public int compare(Lease o1, Lease o2) { - if (o1.getLastUpdate() != o2.getLastUpdate()) { - return Long.signum(o1.getLastUpdate() - o2.getLastUpdate()); - } else { - return o1.holder.compareTo(o2.holder); - } - } - }); + // + private final HashMap leases = new HashMap<>(); // INodeID -> Lease private final TreeMap leasesById = new TreeMap<>(); @@ -344,7 +332,7 @@ public BatchedListEntries getUnderConstructionFiles( /** @return the number of leases currently in the system */ @VisibleForTesting public synchronized int countLease() { - return sortedLeases.size(); + return leases.size(); } /** @return the number of paths contained in all leases */ @@ -360,7 +348,6 @@ synchronized Lease addLease(String holder, long inodeId) { if (lease == null) { lease = new Lease(holder); leases.put(holder, lease); - sortedLeases.add(lease); } else { renewLease(lease); } @@ -386,9 +373,8 @@ private synchronized void removeLease(Lease lease, long inodeId) { } if (!lease.hasFiles()) { - leases.remove(lease.holder); - if (!sortedLeases.remove(lease)) { - LOG.error("{} not found in sortedLeases", lease); + if (leases.remove(lease.holder) == null) { + LOG.error("{} not found", lease); } } } @@ -407,7 +393,6 @@ synchronized void removeLease(String holder, INodeFile src) { } synchronized void removeAllLeases() { - sortedLeases.clear(); leasesById.clear(); leases.clear(); } @@ -430,11 +415,10 @@ synchronized Lease reassignLease(Lease lease, INodeFile src, synchronized void renewLease(String holder) { renewLease(getLease(holder)); } + synchronized void renewLease(Lease lease) { if (lease != null) { - sortedLeases.remove(lease); lease.renew(); - sortedLeases.add(lease); } } @@ -458,10 +442,10 @@ class Lease { private final String holder; private long lastUpdate; private final HashSet files = new HashSet<>(); - + /** Only LeaseManager object can create a lease */ - private Lease(String holder) { - this.holder = holder; + private Lease(String h) { + this.holder = h; renew(); } /** Only LeaseManager object can renew a lease */ @@ -474,6 +458,10 @@ public boolean expiredHardLimit() { return monotonicNow() - lastUpdate > hardLimit; } + public boolean expiredHardLimit(long now) { + return now - lastUpdate > hardLimit; + } + /** @return true if the Soft Limit Timer has expired */ public boolean expiredSoftLimit() { return monotonicNow() - lastUpdate > softLimit; @@ -496,7 +484,7 @@ public String toString() { public int hashCode() { return holder.hashCode(); } - + private Collection getFiles() { return Collections.unmodifiableCollection(files); } @@ -515,6 +503,17 @@ public void setLeasePeriod(long softLimit, long hardLimit) { this.softLimit = softLimit; this.hardLimit = hardLimit; } + + private synchronized Collection getExpiredCandidateLeases() { + final long now = Time.monotonicNow(); + Collection expired = new HashSet<>(); + for (Lease lease : leases.values()) { + if (lease.expiredHardLimit(now)) { + expired.add(lease); + } + } + return expired; + } /****************************************************** * Monitor checks for leases that have expired, @@ -529,10 +528,19 @@ public void run() { for(; shouldRunMonitor && fsnamesystem.isRunning(); ) { boolean needSync = false; try { + // sleep now to avoid infinite loop if an exception was thrown. + Thread.sleep(fsnamesystem.getLeaseRecheckIntervalMs()); + + // pre-filter the leases w/o the fsn lock. + Collection candidates = getExpiredCandidateLeases(); + if (candidates.isEmpty()) { + continue; + } + fsnamesystem.writeLockInterruptibly(); try { if (!fsnamesystem.isInSafeMode()) { - needSync = checkLeases(); + needSync = checkLeases(candidates); } } finally { fsnamesystem.writeUnlock("leaseManager"); @@ -541,8 +549,6 @@ public void run() { fsnamesystem.getEditLog().logSync(); } } - - Thread.sleep(fsnamesystem.getLeaseRecheckIntervalMs()); } catch(InterruptedException ie) { LOG.debug("{} is interrupted", name, ie); } catch(Throwable e) { @@ -557,17 +563,22 @@ public void run() { */ @VisibleForTesting synchronized boolean checkLeases() { + return checkLeases(getExpiredCandidateLeases()); + } + + private synchronized boolean checkLeases(Collection leasesToCheck) { boolean needSync = false; assert fsnamesystem.hasWriteLock(); long start = monotonicNow(); - - while(!sortedLeases.isEmpty() && - sortedLeases.first().expiredHardLimit() - && !isMaxLockHoldToReleaseLease(start)) { - Lease leaseToCheck = sortedLeases.first(); + for (Lease leaseToCheck : leasesToCheck) { + if (isMaxLockHoldToReleaseLease(start)) { + break; + } + if (!leaseToCheck.expiredHardLimit(Time.monotonicNow())) { + continue; + } LOG.info("{} has expired hard limit", leaseToCheck); - final List removing = new ArrayList<>(); // need to create a copy of the oldest lease files, because // internalReleaseLease() removes files corresponding to empty files, @@ -629,7 +640,6 @@ synchronized boolean checkLeases() { removeLease(leaseToCheck, id); } } - return needSync; } @@ -644,7 +654,6 @@ private boolean isMaxLockHoldToReleaseLease(long start) { public synchronized String toString() { return getClass().getSimpleName() + "= {" + "\n leases=" + leases - + "\n sortedLeases=" + sortedLeases + "\n leasesById=" + leasesById + "\n}"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 98ae44ede937d..db08ac200b670 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -57,9 +57,9 @@ import org.apache.hadoop.util.Time; import org.eclipse.jetty.util.ajax.JSON; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * NNStorage is responsible for management of the StorageDirectories used by diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java index aaaaa7210c13b..22be54e5576d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java @@ -37,10 +37,10 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.util.MD5FileUtils; -import com.google.common.base.Preconditions; -import com.google.common.collect.ComparisonChain; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * The NNStorageRetentionManager is responsible for inspecting the storage diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java index 9cca97ab74b32..8086b60637dae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; public abstract class NNUpgradeUtil { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 74757e563a64d..1e70a45ea738a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import java.util.Set; import org.apache.commons.logging.Log; @@ -124,6 +124,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT; @@ -326,7 +328,8 @@ public enum OperationCategory { DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, - DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY)); + DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, + DFS_IMAGE_PARALLEL_LOAD_KEY)); private static final String USAGE = "Usage: hdfs namenode [" + StartupOption.BACKUP.getName() + "] | \n\t[" @@ -384,6 +387,7 @@ public long getProtocolVersion(String protocol, */ @Deprecated public static final int DEFAULT_PORT = DFS_NAMENODE_RPC_PORT_DEFAULT; + public static final String FS_HDFS_IMPL_KEY = "fs.hdfs.impl"; public static final Logger LOG = LoggerFactory.getLogger(NameNode.class.getName()); public static final Logger stateChangeLog = @@ -1245,8 +1249,9 @@ private static boolean format(Configuration conf, boolean force, LOG.info("Formatting using clusterid: {}", clusterId); FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat); + FSNamesystem fsn = null; try { - FSNamesystem fsn = new FSNamesystem(conf, fsImage); + fsn = new FSNamesystem(conf, fsImage); fsImage.getEditLog().initJournalsForWrite(); // Abort NameNode format if reformat is disabled and if @@ -1271,8 +1276,14 @@ private static boolean format(Configuration conf, boolean force, fsImage.format(fsn, clusterId, force); } catch (IOException ioe) { LOG.warn("Encountered exception during format", ioe); - fsImage.close(); throw ioe; + } finally { + if (fsImage != null) { + fsImage.close(); + } + if (fsn != null) { + fsn.close(); + } } return false; } @@ -2187,6 +2198,8 @@ protected String reconfigurePropertyImpl(String property, String newVal) .equals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY)) { reconfBlockPlacementPolicy(); return newVal; + } else if (property.equals(DFS_IMAGE_PARALLEL_LOAD_KEY)) { + return reconfigureParallelLoad(newVal); } else { throw new ReconfigurationException(property, newVal, getConf().get( property)); @@ -2362,6 +2375,17 @@ String reconfigureSPSModeEvent(String newVal, String property) return newVal; } + String reconfigureParallelLoad(String newVal) { + boolean enableParallelLoad; + if (newVal == null) { + enableParallelLoad = DFS_IMAGE_PARALLEL_LOAD_DEFAULT; + } else { + enableParallelLoad = Boolean.parseBoolean(newVal); + } + FSImageFormatProtobuf.refreshParallelSaveAndLoad(enableParallelLoad); + return Boolean.toString(enableParallelLoad); + } + @Override // ReconfigurableBase protected Configuration getNewConf() { return new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 1a3b578510b8f..33913227af2ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -27,7 +27,7 @@ import javax.servlet.ServletContext; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java index d0245d80351c2..4cac0feffdfa1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java @@ -24,7 +24,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.Map; - +import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -33,9 +33,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.common.Util; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Collections2; -import com.google.common.base.Predicate; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * @@ -116,18 +114,15 @@ public NameNodeResourceChecker(Configuration conf) throws IOException { Collection extraCheckedVolumes = Util.stringCollectionAsURIs(conf .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY)); - - Collection localEditDirs = Collections2.filter( - FSNamesystem.getNamespaceEditsDirs(conf), - new Predicate() { - @Override - public boolean apply(URI input) { - if (input.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { - return true; - } - return false; - } - }); + + Collection localEditDirs = + FSNamesystem.getNamespaceEditsDirs(conf).stream().filter( + input -> { + if (input.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { + return true; + } + return false; + }).collect(Collectors.toList()); // Add all the local edits dirs, marking some as required if they are // configured as such. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index fcbd457d7a5e8..1dc53831ac353 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -45,7 +45,7 @@ import java.util.Map; import java.util.Set; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; @@ -184,7 +184,7 @@ import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.RetryCache; @@ -226,8 +226,9 @@ import org.apache.hadoop.util.VersionUtil; import org.slf4j.Logger; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.BlockingService; +import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; import javax.annotation.Nonnull; @@ -281,7 +282,7 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) DFS_NAMENODE_HANDLER_COUNT_DEFAULT); RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ClientNamenodeProtocolServerSideTranslatorPB clientProtocolServerTranslator = @@ -405,7 +406,7 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) InetSocketAddress lifelineRpcAddr = nn.getLifelineRpcServerAddress(conf); if (lifelineRpcAddr != null) { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); String bindHost = nn.getLifelineRpcServerBindHost(conf); if (bindHost == null) { bindHost = lifelineRpcAddr.getHostName(); @@ -542,7 +543,8 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) QuotaByStorageTypeExceededException.class, AclException.class, FSLimitException.PathComponentTooLongException.class, - FSLimitException.MaxDirectoryItemsExceededException.class); + FSLimitException.MaxDirectoryItemsExceededException.class, + DisallowedDatanodeException.class); clientRpcServer.addSuppressedLoggingExceptions(StandbyException.class, UnresolvedPathException.class); @@ -662,6 +664,7 @@ public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long } checkNNStartup(); namesystem.checkSuperuserPrivilege(); + namesystem.checkNameNodeSafeMode("Cannot execute getBlocks"); return namesystem.getBlocks(datanode, size, minBlockSize); } @@ -1054,6 +1057,8 @@ public boolean rename(String src, String dst) throws IOException { @Override // ClientProtocol public void concat(String trg, String[] src) throws IOException { checkNNStartup(); + stateChangeLog.debug("*DIR* NameNode.concat: src path {} to" + + " target path {}", Arrays.toString(src), trg); namesystem.checkOperation(OperationCategory.WRITE); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { @@ -2259,10 +2264,10 @@ public void setErasureCodingPolicy(String src, String ecPolicyName) try { if (ecPolicyName == null) { ecPolicyName = defaultECPolicyName; - LOG.trace("No policy name is specified, " + + LOG.debug("No policy name is specified, " + "set the default policy name instead"); } - LOG.trace("Set erasure coding policy " + ecPolicyName + " on " + src); + LOG.debug("Set erasure coding policy {} on {}", ecPolicyName, src); namesystem.setErasureCodingPolicy(src, ecPolicyName, cacheEntry != null); success = true; } finally { @@ -2513,6 +2518,7 @@ public void unsetErasureCodingPolicy(String src) throws IOException { } boolean success = false; try { + LOG.debug("Unset erasure coding policy on {}", src); namesystem.unsetErasureCodingPolicy(src, cacheEntry != null); success = true; } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java index ec1d5104922ec..85254cd840942 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java @@ -20,7 +20,7 @@ import javax.annotation.Nullable; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtilClient; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index b36c11a671549..9b498b2c4e49d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -89,7 +89,7 @@ import org.apache.hadoop.util.Time; import org.apache.htrace.core.Tracer; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class provides rudimentary checking of DFS volumes for errors and @@ -155,6 +155,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { private boolean showMaintenanceState = false; private long staleInterval; private Tracer tracer; + private String auditSource; /** * True if we encountered an internal error during FSCK, such as not being @@ -186,7 +187,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { String path = "/"; - private String blockIds = null; + private String[] blockIds = null; // We return back N files that are corrupt; the list of files returned is // ordered by block id; to allow continuation support, pass in the last block @@ -262,11 +263,17 @@ else if (key.equals("replicadetails")) { } else if (key.equals("includeSnapshots")) { this.snapshottableDirs = new ArrayList(); } else if (key.equals("blockId")) { - this.blockIds = pmap.get("blockId")[0]; + this.blockIds = pmap.get("blockId")[0].split(" "); } else if (key.equals("replicate")) { this.doReplicate = true; } } + this.auditSource = (blockIds != null) + ? "blocksIds=" + Arrays.asList(blockIds) : path; + } + + public String getAuditSource() { + return auditSource; } /** @@ -368,18 +375,18 @@ private void printDatanodeReplicaStatus(Block block, /** * Check files on DFS, starting from the indicated path. */ - public void fsck() { + public void fsck() throws AccessControlException { final long startTime = Time.monotonicNow(); try { if(blockIds != null) { - String[] blocks = blockIds.split(" "); + namenode.getNamesystem().checkSuperuserPrivilege(); StringBuilder sb = new StringBuilder(); sb.append("FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " at " + new Date()); out.println(sb); sb.append(" for blockIds: \n"); - for (String blk: blocks) { + for (String blk: blockIds) { if(blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) { out.println("Incorrect blockId format: " + blk); continue; @@ -389,7 +396,6 @@ public void fsck() { sb.append(blk + "\n"); } LOG.info("{}", sb.toString()); - namenode.getNamesystem().logFsckEvent("/", remoteAddress); out.flush(); return; } @@ -398,7 +404,6 @@ public void fsck() { + " from " + remoteAddress + " for path " + path + " at " + new Date(); LOG.info(msg); out.println(msg); - namenode.getNamesystem().logFsckEvent(path, remoteAddress); if (snapshottableDirs != null) { SnapshottableDirectoryStatus[] snapshotDirs = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java index 934c3169fc014..fd98ce9ef6146 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java index bcb21929c8b9f..002972caf63e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.util.ConstEnumCounters; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java index edda691fbbeac..15f799ab215ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java @@ -26,8 +26,8 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Longs; import org.apache.hadoop.log.LogThrottlingHelper; import org.apache.hadoop.log.LogThrottlingHelper.LogAction; @@ -170,6 +170,7 @@ protected FSEditLogOp nextValidOp() { } return nextOp(); } catch (IOException e) { + LOG.warn("encountered an exception", e); return null; } } @@ -228,7 +229,8 @@ protected FSEditLogOp nextOp() throws IOException { "streams are shorter than the current one! The best " + "remaining edit log ends at transaction " + newLast + ", but we thought we could read up to transaction " + - oldLast + ". If you continue, metadata will be lost forever!"); + oldLast + ". If you continue, metadata will be lost forever!", + prevException); } LOG.error("Got error reading edit log input stream " + streams[curIdx].getName() + "; failing over to edit log " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java index fd9cbd752750e..ea38da6021ce8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java index 15cfa9278f6b9..f2d09b0627f3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java index 823385a980667..35a7cd2f643cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.util.Canceler; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Context for an ongoing SaveNamespace operation. This class diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 1423b308012c0..29cfe4280287b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -29,7 +29,7 @@ import java.security.PrivilegedExceptionAction; import java.util.*; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.HelpFormatter; @@ -77,8 +77,8 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.VersionInfo; import javax.management.ObjectName; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 82cdcfe69d9be..7ba6d839bea1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -57,8 +57,8 @@ import org.apache.hadoop.util.Time; import org.apache.http.client.utils.URIBuilder; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.eclipse.jetty.io.EofException; import static org.apache.hadoop.hdfs.server.common.Util.IO_FILE_BUFFER_SIZE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java index a7f08780a63ec..73df932f2fbe5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.XAttrHelper; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * Feature for extended attributes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java index 5b5992c7a5894..af1025ab457ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java @@ -25,8 +25,8 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.XAttrHelper; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.hdfs.util.LongBitFormat; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index b6f4f64ca517c..92e5ef1a0b86d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -25,8 +25,8 @@ import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.security.AccessControlException; -import com.google.common.collect.Lists; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index 0babaaf0c1bc5..3f273cb5e75e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -70,8 +70,8 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Tool which allows the standby node's storage directories to be bootstrapped diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index d27947b995238..b82fb5b0e41d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -34,8 +34,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import com.google.common.collect.Iterators; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -58,8 +58,8 @@ import static org.apache.hadoop.util.Time.monotonicNow; import static org.apache.hadoop.util.ExitUtil.terminate; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Time; @@ -298,13 +298,23 @@ public void catchupDuringFailover() throws IOException { SecurityUtil.doAsLoginUser(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try { - // It is already under the full name system lock and the checkpointer - // thread is already stopped. No need to acqure any other lock. - doTailEdits(); - } catch (InterruptedException e) { - throw new IOException(e); - } + long editsTailed = 0; + // Fully tail the journal to the end + do { + long startTime = Time.monotonicNow(); + try { + NameNode.getNameNodeMetrics().addEditLogTailInterval( + startTime - lastLoadTimeMs); + // It is already under the name system lock and the checkpointer + // thread is already stopped. No need to acquire any other lock. + editsTailed = doTailEdits(); + } catch (InterruptedException e) { + throw new IOException(e); + } finally { + NameNode.getNameNodeMetrics().addEditLogTailTime( + Time.monotonicNow() - startTime); + } + } while(editsTailed > 0); return null; } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java index 9a51190b17607..1ff251d808c8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java @@ -24,13 +24,13 @@ import java.util.Collections; import java.util.List; -import com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Information about a single remote NameNode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index daa836ac69a5f..1a86f8e82f7dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.util.Time.monotonicNow; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.IOException; import java.net.URI; import java.net.URL; @@ -48,9 +48,9 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java index 7dbddc2d3ae5d..7e5f108167ccc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java @@ -150,6 +150,12 @@ public interface FSNamesystemMBean { */ public int getNumDecomDeadDataNodes(); + /** + * @return Number of in-service data nodes, where NumInServiceDataNodes = + * NumLiveDataNodes - NumDecomLiveDataNodes - NumInMaintenanceLiveDataNodes + */ + int getNumInServiceLiveDataNodes(); + /** * Number of failed data volumes across all live data nodes. * @return number of failed data volumes across all live data nodes diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java index 2e8620b649593..df052f171afa8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * The difference of an inode between in two snapshots. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java index 705b8d6937b52..dedc1e49d341f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.snapshot. DirectoryWithSnapshotFeature.DirectoryDiff; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java index b38d8bfe8ce06..85e5ae46bef49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java @@ -47,9 +47,9 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * A directory with this feature is a snapshottable directory, where snapshots diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java index 4e756c7268ccf..b9b446707a115 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.*; @@ -739,19 +739,22 @@ public void cleanDirectory(INode.ReclaimContext reclaimContext, // were created before "prior" will be covered by the later // cleanSubtreeRecursively call. if (priorCreated != null) { - if (currentINode.isLastReference() && - currentINode.getDiffs().getLastSnapshotId() == prior) { - // If this is the last reference of the directory inode and it - // can not be accessed in any of the subsequent snapshots i.e, - // this is the latest snapshot diff and if this is the last - // reference, the created list can be - // destroyed. - priorDiff.getChildrenDiff().destroyCreatedList( - reclaimContext, currentINode); - } else { - // we only check the node originally in prior's created list - for (INode cNode : priorDiff.diff.getCreatedUnmodifiable()) { - if (priorCreated.containsKey(cNode)) { + // The nodes in priorCreated must be destroyed if + // (1) this is the last reference, and + // (2) prior is the last snapshot, and + // (3) currentINode is not in the current state. + final boolean destroy = currentINode.isLastReference() + && currentINode.getDiffs().getLastSnapshotId() == prior + && !currentINode.isInCurrentState(); + // we only check the node originally in prior's created list + for (INode cNode : new ArrayList<>(priorDiff. + diff.getCreatedUnmodifiable())) { + if (priorCreated.containsKey(cNode)) { + if (destroy) { + cNode.destroyAndCollectBlocks(reclaimContext); + currentINode.removeChild(cNode); + priorDiff.diff.removeCreated(cNode); + } else { cNode.cleanSubtree(reclaimContext, snapshot, NO_SNAPSHOT_ID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 5f5cd41166a7e..8d05af284b4e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -35,7 +35,7 @@ import java.util.List; import java.util.Map; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.StorageType; @@ -82,7 +82,7 @@ import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import org.apache.hadoop.hdfs.util.EnumCounters; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.protobuf.ByteString; @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index 515f164bd8aac..f13ef611346a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -24,7 +24,7 @@ import java.util.Arrays; import java.util.Comparator; import java.util.Date; - +import java.util.stream.Collectors; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; @@ -38,9 +38,6 @@ import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import org.apache.hadoop.hdfs.util.ReadOnlyList; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; -import com.google.common.collect.Lists; import org.apache.hadoop.security.AccessControlException; /** Snapshot of a sub-tree in the namesystem. */ @@ -149,20 +146,14 @@ static Snapshot read(DataInput in, FSImageFormat.Loader loader) static public class Root extends INodeDirectory { Root(INodeDirectory other) { // Always preserve ACL, XAttr. - super(other, false, Lists.newArrayList( - Iterables.filter(Arrays.asList(other.getFeatures()), new Predicate() { - - @Override - public boolean apply(Feature input) { - if (AclFeature.class.isInstance(input) + super(other, false, Arrays.asList(other.getFeatures()).stream().filter( + input -> { + if (AclFeature.class.isInstance(input) || XAttrFeature.class.isInstance(input)) { return true; } return false; - } - - })) - .toArray(new Feature[0])); + }).collect(Collectors.toList()).toArray(new Feature[0])); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java index ab6f4158c8417..58dd2cf0a3f04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; -import com.google.common.base.Preconditions; -import com.google.common.primitives.SignedBytes; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.primitives.SignedBytes; import org.apache.hadoop.util.ChunkedArrayList; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffListingInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffListingInfo.java index a7960700e6811..8861a1f0ecbdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffListingInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffListingInfo.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.ChunkedArrayList; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java index d60a03822e57d..b43c45854bbfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff; import org.apache.hadoop.hdfs.util.ReadOnlyList; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A helper class defining static methods for reading/writing snapshot related diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 30b98b8e86421..e85da654ce0d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -35,7 +35,7 @@ import javax.management.ObjectName; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; @@ -57,7 +57,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.metrics2.util.MBeans; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementAttemptedItems.java index df4f0dddb49bb..ec583c710d533 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementAttemptedItems.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementAttemptedItems.java @@ -43,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A monitor class for checking whether block storage movements attempt diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java index b3f8de97b4f26..2bd52eb27fb9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java @@ -29,7 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A Class to track the block collection IDs (Inode's ID) for which physical diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java index 4c04b466789be..77675479bd205 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java @@ -57,8 +57,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Setting storagePolicy on a file after the file write will only update the new diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java index 14cf05d4200d2..394ab12d443de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java @@ -31,7 +31,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This manages satisfy storage policy invoked path ids and expose methods to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java index feba0e4f427e1..93eea6068c2fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java @@ -19,7 +19,7 @@ import java.net.InetAddress; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java index ba820323b20b1..e78e41957d107 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java @@ -19,11 +19,11 @@ import java.util.concurrent.TimeUnit; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * This class is a common place for NNTop configuration. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java index 72ec9f23446e8..3f6bb13228d55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.top.metrics; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.namenode.top.TopConf; import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager; import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op; import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User; @@ -144,8 +143,6 @@ public void report(long currTime, String userName, String cmd) { for (RollingWindowManager rollingWindowManager : rollingWindowManagers .values()) { rollingWindowManager.recordMetric(currTime, cmd, userName, 1); - rollingWindowManager.recordMetric(currTime, - TopConf.ALL_CMDS, userName, 1); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java index f927106c344ad..9cf64f42a0afc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java @@ -113,8 +113,8 @@ private int computeBucketIndex(long time) { * as well as atomic fields. */ private class Bucket { - AtomicLong value = new AtomicLong(0); - AtomicLong updateTime = new AtomicLong(0); + private AtomicLong value = new AtomicLong(0); + private AtomicLong updateTime = new AtomicLong(-1); // -1 = never updated. /** * Check whether the last time that the bucket was updated is no longer @@ -125,7 +125,7 @@ private class Bucket { */ boolean isStaleNow(long time) { long utime = updateTime.get(); - return time - utime >= windowLenMs; + return (utime == -1) || (time - utime >= windowLenMs); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java index 095294e811890..4e9807399395c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java @@ -17,20 +17,22 @@ */ package org.apache.hadoop.hdfs.server.namenode.top.window; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.Stack; import java.util.concurrent.ConcurrentHashMap; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.server.namenode.top.TopConf; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.metrics2.util.Metrics2Util.NameValuePair; -import org.apache.hadoop.metrics2.util.Metrics2Util.TopN; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,11 +68,15 @@ public static class TopWindow { public TopWindow(int windowMillis) { this.windowMillis = windowMillis; - this.top = Lists.newArrayList(); + this.top = new LinkedList<>(); } public void addOp(Op op) { - top.add(op); + if (op.getOpType().equals(TopConf.ALL_CMDS)) { + top.add(0, op); + } else { + top.add(op); + } } public int getWindowLenMs() { @@ -86,41 +92,59 @@ public List getOps() { * Represents an operation within a TopWindow. It contains a ranked * set of the top users for the operation. */ - public static class Op { + public static class Op implements Comparable { private final String opType; - private final List topUsers; + private final List users; private final long totalCount; + private final int limit; - public Op(String opType, long totalCount) { + public Op(String opType, UserCounts users, int limit) { this.opType = opType; - this.topUsers = Lists.newArrayList(); - this.totalCount = totalCount; - } - - public void addUser(User u) { - topUsers.add(u); + this.users = new ArrayList<>(users); + this.users.sort(Collections.reverseOrder()); + this.totalCount = users.getTotal(); + this.limit = limit; } public String getOpType() { return opType; } + public List getAllUsers() { + return users; + } + public List getTopUsers() { - return topUsers; + return (users.size() > limit) ? users.subList(0, limit) : users; } public long getTotalCount() { return totalCount; } + + @Override + public int compareTo(Op other) { + return Long.signum(totalCount - other.totalCount); + } + + @Override + public boolean equals(Object o) { + return (o instanceof Op) && totalCount == ((Op)o).totalCount; + } + + @Override + public int hashCode() { + return opType.hashCode(); + } } /** * Represents a user who called an Op within a TopWindow. Specifies the * user and the number of times the user called the operation. */ - public static class User { + public static class User implements Comparable { private final String user; - private final long count; + private long count; public User(String user, long count) { this.user = user; @@ -134,6 +158,56 @@ public String getUser() { public long getCount() { return count; } + + public void add(long delta) { + count += delta; + } + + @Override + public int compareTo(User other) { + return Long.signum(count - other.count); + } + + @Override + public boolean equals(Object o) { + return (o instanceof User) && user.equals(((User)o).user); + } + + @Override + public int hashCode() { + return user.hashCode(); + } + } + + private static class UserCounts extends ArrayList { + private long total = 0; + + UserCounts(int capacity) { + super(capacity); + } + + @Override + public boolean add(User user) { + long count = user.getCount(); + int i = indexOf(user); + if (i == -1) { + super.add(new User(user.getUser(), count)); + } else { + get(i).add(count); + } + total += count; + return true; + } + + @Override + public boolean addAll(Collection users) { + users.forEach(user -> add(user)); + return true; + } + + public long getTotal() { + return total; + } } /** @@ -142,7 +216,7 @@ public long getCount() { * operated on that metric. */ public ConcurrentHashMap metricMap = - new ConcurrentHashMap(); + new ConcurrentHashMap<>(); public RollingWindowManager(Configuration conf, int reportingPeriodMs) { @@ -184,35 +258,33 @@ public void recordMetric(long time, String command, * * @param time the current time * @return a TopWindow describing the top users for each metric in the - * window. + * window. */ public TopWindow snapshot(long time) { TopWindow window = new TopWindow(windowLenMs); Set metricNames = metricMap.keySet(); LOG.debug("iterating in reported metrics, size={} values={}", metricNames.size(), metricNames); + UserCounts totalCounts = new UserCounts(metricMap.size()); for (Map.Entry entry : metricMap.entrySet()) { String metricName = entry.getKey(); RollingWindowMap rollingWindows = entry.getValue(); - TopN topN = getTopUsersForMetric(time, metricName, rollingWindows); - final int size = topN.size(); - if (size == 0) { - continue; - } - Op op = new Op(metricName, topN.getTotal()); - window.addOp(op); - // Reverse the users from the TopUsers using a stack, - // since we'd like them sorted in descending rather than ascending order - Stack reverse = new Stack(); - for (int i = 0; i < size; i++) { - reverse.push(topN.poll()); - } - for (int i = 0; i < size; i++) { - NameValuePair userEntry = reverse.pop(); - User user = new User(userEntry.getName(), userEntry.getValue()); - op.addUser(user); + UserCounts topN = getTopUsersForMetric(time, metricName, rollingWindows); + if (!topN.isEmpty()) { + window.addOp(new Op(metricName, topN, topUsersCnt)); + totalCounts.addAll(topN); } } + // synthesize the overall total op count with the top users for every op. + Set topUsers = new HashSet<>(); + for (Op op : window.getOps()) { + topUsers.addAll(op.getTopUsers()); + } + // intersect totals with the top users. + totalCounts.retainAll(topUsers); + // allowed to exceed the per-op topUsersCnt to capture total ops for + // any user + window.addOp(new Op(TopConf.ALL_CMDS, totalCounts, Integer.MAX_VALUE)); return window; } @@ -223,9 +295,9 @@ public TopWindow snapshot(long time) { * @param metricName Name of metric * @return */ - private TopN getTopUsersForMetric(long time, String metricName, + private UserCounts getTopUsersForMetric(long time, String metricName, RollingWindowMap rollingWindows) { - TopN topN = new TopN(topUsersCnt); + UserCounts topN = new UserCounts(topUsersCnt); Iterator> iterator = rollingWindows.entrySet().iterator(); while (iterator.hasNext()) { @@ -242,7 +314,7 @@ private TopN getTopUsersForMetric(long time, String metricName, } LOG.debug("offer window of metric: {} userName: {} sum: {}", metricName, userName, windowSum); - topN.offer(new NameValuePair(userName, windowSum)); + topN.add(new User(userName, windowSum)); } LOG.debug("topN users size for command {} is: {}", metricName, topN.size()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 2423a037c8fd0..c75fbe04f23d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -113,9 +113,9 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.StringUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.spi.container.ResourceFilters; /** Web-hdfs NameNode implementation. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECReconstructionCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECReconstructionCommand.java index 6e9c55be2c24b..b2495c8d6d9d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECReconstructionCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockECReconstructionCommand.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java index 926cce91be9a3..b2a11f0a171fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** * BlockRecoveryCommand is an instruction to a data-node to recover diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java index b6eddb61579da..f60d748dc9f56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java index 711973838bc06..d7c2466e2eeee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * DatanodeRegistration class contains all information the name-node needs diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index 10650da75d14a..81a7e457891cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.util.VersionInfo; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * NamespaceInfo is returned by the name-node in reply diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java index 1d26bc4983e54..8c27f30978324 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import com.google.common.base.Function; -import com.google.common.collect.ComparisonChain; +import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; +import java.util.function.Function; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; public class RemoteEditLog implements Comparable { @@ -82,16 +82,13 @@ public int hashCode() { } /** - * Guava Function which applies {@link #getStartTxId()} + * Java Function which applies {@link #getStartTxId()} */ public static final Function GET_START_TXID = - new Function() { - @Override - public Long apply(RemoteEditLog log) { + log -> { if (null == log) { return HdfsServerConstants.INVALID_TXID; } return log.getStartTxId(); - } - }; + }; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java index 8252b3bc7f702..391078f558509 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java @@ -20,8 +20,8 @@ import java.util.Collections; import java.util.List; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java index 9cb646b38f6f7..40d0e69591c9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java @@ -1,4 +1,5 @@ /** + * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,10 +18,12 @@ */ package org.apache.hadoop.hdfs.tools; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; @@ -43,19 +46,29 @@ public class AdminHelper { static DistributedFileSystem getDFS(Configuration conf) throws IOException { FileSystem fs = FileSystem.get(conf); - if (!(fs instanceof DistributedFileSystem)) { - throw new IllegalArgumentException("FileSystem " + fs.getUri() + - " is not an HDFS file system"); - } - return (DistributedFileSystem)fs; + return checkAndGetDFS(fs, conf); } static DistributedFileSystem getDFS(URI uri, Configuration conf) throws IOException { FileSystem fs = FileSystem.get(uri, conf); + return checkAndGetDFS(fs, conf); + } + + static DistributedFileSystem checkAndGetDFS(FileSystem fs, Configuration conf) + throws IOException { + if ((fs instanceof ViewFileSystemOverloadScheme)) { + // With ViewFSOverloadScheme, the admin will pass -fs option with intended + // child fs mount path. GenericOptionsParser would have set the given -fs + // as FileSystem's defaultURI. So, we are using FileSystem.getDefaultUri + // to use the given -fs path. + fs = ((ViewFileSystemOverloadScheme) fs) + .getRawFileSystem(new Path(FileSystem.getDefaultUri(conf)), conf); + } if (!(fs instanceof DistributedFileSystem)) { throw new IllegalArgumentException("FileSystem " + fs.getUri() - + " is not an HDFS file system"); + + " is not an HDFS file system. The fs class is: " + + fs.getClass().getName()); } return (DistributedFileSystem) fs; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index 9e7a3cb9c753b..a22d34bf007e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -44,7 +44,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.util.ToolRunner; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 04960e3c3e2ce..5344d14165910 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -38,7 +38,7 @@ import java.util.TreeSet; import java.util.concurrent.TimeUnit; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -87,7 +87,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.io.MultipleIOException; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RefreshCallQueueProtocol; import org.apache.hadoop.ipc.RefreshResponse; @@ -103,7 +103,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * This class provides some DFS administrative access shell commands. @@ -130,11 +130,7 @@ public DFSAdminCommand(Configuration conf) { @Override public void run(PathData pathData) throws IOException { FileSystem fs = pathData.fs; - if (!(fs instanceof DistributedFileSystem)) { - throw new IllegalArgumentException("FileSystem " + fs.getUri() - + " is not an HDFS file system"); - } - this.dfs = (DistributedFileSystem) fs; + this.dfs = AdminHelper.checkAndGetDFS(fs, getConf()); run(pathData.path); } } @@ -483,14 +479,9 @@ public DFSAdmin() { public DFSAdmin(Configuration conf) { super(conf); } - + protected DistributedFileSystem getDFS() throws IOException { - FileSystem fs = getFS(); - if (!(fs instanceof DistributedFileSystem)) { - throw new IllegalArgumentException("FileSystem " + fs.getUri() + - " is not an HDFS file system"); - } - return (DistributedFileSystem)fs; + return AdminHelper.checkAndGetDFS(getFS(), getConf()); } /** @@ -1010,14 +1001,14 @@ public int listOpenFiles(String[] argv) throws IOException { private void printOpenFiles(RemoteIterator openFilesIterator) throws IOException { - System.out.println(String.format("%-20s\t%-20s\t%s", "Client Host", - "Client Name", "Open File Path")); + System.out.printf("%-20s\t%-20s\t%s%n", "Client Host", + "Client Name", "Open File Path"); while (openFilesIterator.hasNext()) { OpenFileEntry openFileEntry = openFilesIterator.next(); - System.out.println(String.format("%-20s\t%-20s\t%20s", + System.out.printf("%-20s\t%-20s\t%20s%n", openFileEntry.getClientMachine(), openFileEntry.getClientName(), - openFileEntry.getFilePath())); + openFileEntry.getFilePath()); } } @@ -1045,14 +1036,7 @@ public int setBalancerBandwidth(String[] argv, int idx) throws IOException { System.err.println("Bandwidth should be a non-negative integer"); return exitCode; } - - FileSystem fs = getFS(); - if (!(fs instanceof DistributedFileSystem)) { - System.err.println("FileSystem is " + fs.getUri()); - return exitCode; - } - - DistributedFileSystem dfs = (DistributedFileSystem) fs; + DistributedFileSystem dfs = getDFS(); try{ dfs.setBalancerBandwidth(bandwidth); System.out.println("Balancer bandwidth is set to " + bandwidth); @@ -2045,7 +2029,7 @@ public int genericRefresh(String[] argv, int i) throws IOException { InetSocketAddress address = NetUtils.createSocketAddr(hostport); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine2.class); GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB) RPC.getProxy(xface, RPC.getProtocolVersion(xface), address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java index fcfb47c8c65ab..15c63732f7a69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java @@ -23,9 +23,9 @@ import java.util.Collection; import java.util.Map; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSortedMap; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSortedMap; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; import org.apache.hadoop.ha.FailoverController; @@ -284,6 +284,11 @@ private int failover(CommandLine cmd) HAServiceTarget fromNode = resolveTarget(args[0]); HAServiceTarget toNode = resolveTarget(args[1]); + fromNode.setTransitionTargetHAStatus( + HAServiceProtocol.HAServiceState.STANDBY); + toNode.setTransitionTargetHAStatus( + HAServiceProtocol.HAServiceState.ACTIVE); + // Check that auto-failover is consistently configured for both nodes. Preconditions.checkState( fromNode.isAutoFailoverEnabled() == diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java index 281d1be31eb3b..4773beb85de08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java @@ -29,7 +29,7 @@ import java.util.ArrayList; import java.util.List; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -111,21 +111,39 @@ protected byte[] targetToData(HAServiceTarget target) { @Override protected InetSocketAddress getRpcAddressToBindTo() { int zkfcPort = getZkfcPort(conf); - return new InetSocketAddress(localTarget.getAddress().getAddress(), - zkfcPort); + String zkfcBindAddr = getZkfcServerBindHost(conf); + if (zkfcBindAddr == null || zkfcBindAddr.isEmpty()) { + zkfcBindAddr = localTarget.getAddress().getAddress().getHostAddress(); + } + return new InetSocketAddress(zkfcBindAddr, zkfcPort); } - @Override protected PolicyProvider getPolicyProvider() { return new HDFSPolicyProvider(); } - + static int getZkfcPort(Configuration conf) { return conf.getInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY, DFSConfigKeys.DFS_HA_ZKFC_PORT_DEFAULT); } - + + /** + * Given a configuration get the bind host that could be used by ZKFC. + * We derive it from NN service rpc bind host or NN rpc bind host. + * + * @param conf input configuration + * @return the bind host address found in conf + */ + private static String getZkfcServerBindHost(Configuration conf) { + String addr = conf.getTrimmed( + DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY); + if (addr == null || addr.isEmpty()) { + addr = conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY); + } + return addr; + } + public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java index 9a654e580cb06..89389a094686a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java @@ -33,7 +33,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index 0dc4bcba4d24f..10156287be15d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -43,7 +43,7 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.GenericOptionsParser; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Fetch a DelegationToken from the current Namenode and store it in the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java index c4527e59b2884..94aff53470b72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetUtils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java index a6901f335d1fb..7238c58cb579b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java @@ -41,7 +41,7 @@ import org.xml.sax.helpers.DefaultHandler; import org.xml.sax.helpers.XMLReaderFactory; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * OfflineEditsXmlLoader walks an EditsVisitor over an OEV XML file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java index cc97ea71d28e5..411df1062771b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * StatisticsEditsVisitor implements text version of EditsVisitor diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java index 9b0031a3a2eaa..f14ee5f930fe0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFutureListener; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java index 7032e2a776d34..0d4781d12f7ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java @@ -30,7 +30,7 @@ import java.util.List; import java.util.Map; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; @@ -57,9 +57,9 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.LimitInputStream; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * FSImageLoader loads fsimage and provide methods to return JSON formatted diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java index 25a7bbe40bd0a..54b183b7b6965 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java @@ -33,7 +33,7 @@ import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * This is the tool for analyzing file sizes in the namespace image. In order to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java index 7eccf75dc65e5..9ad4b090649b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.ACL_ENTRY_NAME_MASK; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.ACL_ENTRY_NAME_OFFSET; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.ACL_ENTRY_SCOPE_OFFSET; @@ -49,8 +49,8 @@ import java.util.LinkedList; import java.util.Map; -import com.google.common.io.CountingOutputStream; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.io.CountingOutputStream; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.TextFormat; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageCorruptionDetector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageCorruptionDetector.java index 7e5eb3177f95b..737e7384b9a7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageCorruptionDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageCorruptionDetector.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.server.namenode.FsImageProto; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java index 1bd7d97f8a155..cd4047d9e646c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.commons.text.StringEscapeUtils; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java index 2c4a8946e8bd2..920dfdc6dc933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java @@ -68,8 +68,8 @@ import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.util.LimitInputStream; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.util.VersionInfo; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_MASK; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java index 7f837fd32d6d9..f732bd6a7f542 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java @@ -22,7 +22,7 @@ import java.nio.file.Files; import java.nio.file.Paths; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * TextWriterImageProcessor mixes in the ability for ImageVisitor diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java index 767ecd809e048..29ac759576c28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelInitializer; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/BestEffortLongFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/BestEffortLongFile.java index 2fdf5472bee03..375630db6bdc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/BestEffortLongFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/BestEffortLongFile.java @@ -28,8 +28,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IOUtils; -import com.google.common.io.Files; -import com.google.common.primitives.Longs; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Longs; /** * Class that represents a file on disk which stores a single long diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java index 188537b756f5d..21a7bb58750b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hdfs.util; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; -import com.google.common.base.Preconditions; - /** * The difference between the current state and a previous state of a list. * @@ -166,6 +166,17 @@ public E setCreated(int index, E element) { return old; } + public boolean removeCreated(final E element) { + if (created != null) { + final int i = search(created, element.getKey()); + if (i >= 0 && created.get(i) == element) { + created.remove(i); + return true; + } + } + return false; + } + public void clearCreated() { if (created != null) { created.clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java index dded84800ec33..880bf6edb2516 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.util; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.ArrayUtils; import java.util.Arrays; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java index 126070aa016a1..fee687edf54b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java @@ -19,7 +19,7 @@ import java.util.Arrays; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Similar to {@link EnumCounters} except that the value type is double. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java index b541b14f64924..2bc63ec77eb29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java @@ -35,7 +35,7 @@ import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * Static functions for dealing with files of the same format diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java index a94d7edff08b3..78834e0161a4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java @@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IOUtils; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * Class that represents a file on disk which persistently stores diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReferenceCountMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReferenceCountMap.java index 5b29c4308b8bb..6c40b29c0d812 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReferenceCountMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReferenceCountMap.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hdfs.util; -import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * Class for de-duplication of instances.
    @@ -37,7 +37,7 @@ @InterfaceStability.Evolving public class ReferenceCountMap { - private Map referenceMap = new HashMap(); + private Map referenceMap = new ConcurrentHashMap<>(); /** * Add the reference. If the instance already present, just increase the @@ -47,10 +47,9 @@ public class ReferenceCountMap { * @return Referenced instance */ public E put(E key) { - E value = referenceMap.get(key); + E value = referenceMap.putIfAbsent(key, key); if (value == null) { value = key; - referenceMap.put(key, value); } value.incrementAndGetRefCount(); return value; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index bf5568bc3e563..ceddfca2ced09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.web; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; @@ -40,7 +40,7 @@ import org.apache.hadoop.util.StringUtils; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.IOException; import java.util.*; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java index 2e0a17efa4b30..02b70c16be649 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java @@ -35,7 +35,7 @@ import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.ParamException; import com.sun.jersey.api.container.ContainerException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 913e47b75ff67..ad62dd4e7f346 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -327,9 +327,23 @@ Decide if sort block locations considers the target's load or not when read. Turn off by default. + It is not possible to enable this feature along with dfs.namenode.read.considerStorageType as only one sort can be + enabled at a time. + + dfs.namenode.read.considerStorageType + false + + Decide if sort block locations considers the target's storage type or not when read. Any locations with the same + network distance are sorted in order of the storage speed, fastest first (RAM, SSD, Disk, Archive). This is + disabled by default, and the locations will be ordered randomly. + It is not possible to enable this feature along with dfs.namenode.read.considerLoad as only one sort can be + enabled at a time. + + + dfs.datanode.httpserver.filter.handlers org.apache.hadoop.hdfs.server.datanode.web.RestCsrfPreventionFilterHandler @@ -1572,6 +1586,25 @@ + + dfs.block.scanner.skip.recent.accessed + false + + If this is true, scanner will check the access time of block file to avoid + scanning blocks accessed during recent scan peroid, reducing disk IO. + This feature will not work if the DataNode volume has noatime mount option. + + + + + dfs.block.scanner.volume.join.timeout.ms + 5000 + + The amount of time in milliseconds that the BlockScanner times out waiting + for the VolumeScanner thread to join during a shutdown call. + + + dfs.datanode.readahead.bytes 4194304 @@ -2978,6 +3011,15 @@ + + dfs.client.read.use.cache.priority + false + + If true, the cached replica of the datanode is preferred + else the replica closest to client is preferred. + + + dfs.block.local-path-access.user @@ -3078,26 +3120,18 @@ - dfs.client.deadnode.detection.deadnode.queue.max - 100 - - The max queue size of probing dead node. - - - - - dfs.client.deadnode.detection.suspectnode.queue.max - 1000 + dfs.client.deadnode.detection.probe.deadnode.threads + 10 - The max queue size of probing suspect node. + The maximum number of threads to use for probing dead node. - dfs.client.deadnode.detection.probe.deadnode.threads - 10 + dfs.client.deadnode.detection.idle.sleep.ms + 10000 - The maximum number of threads to use for probing dead node. + The sleep time of DeadNodeDetector per iteration. @@ -3242,7 +3276,7 @@ dfs.datanode.block.id.layout.upgrade.threads - 12 + 6 The number of threads to use when creating hard links from current to previous blocks during upgrade of a DataNode to block ID-based block layout (see HDFS-6482 for details on the layout). @@ -4155,6 +4189,16 @@ + + dfs.client.short.circuit.num + 1 + + Number of short-circuit caches. This setting should + be in the range 1 - 5. Lower values will result in lower CPU consumption; higher + values may speed up massive parallel reading files. + + + dfs.client.read.striped.threadpool.size 18 @@ -4579,6 +4623,15 @@ + + dfs.ha.allow.stale.reads + false + + If true, a NameNode in Standby state can process read request and the result + could be stale. + + + dfs.journalnode.edits.dir /tmp/hadoop/dfs/journalnode/ @@ -4747,6 +4800,27 @@ + + dfs.namenode.audit.log.async.blocking + true + + Only used when enables asynchronous audit log. Sets whether audit log async + appender should wait if there is no space available in the event buffer or + immediately return. Default value is true. + + + + + dfs.namenode.audit.log.async.buffer.size + 128 + + Only used when enables asynchronous audit log. Sets the number of audit + logs allowed in the event buffer before the calling thread is blocked + (if dfs.namenode.audit.log.async.blocking is true) or until logs are + summarized and discarded. Default value is 128. + + + dfs.namenode.audit.log.token.tracking.id false @@ -4901,7 +4975,7 @@ dfs.namenode.replication.max-streams 2 - Hard limit for the number of highest-priority replication streams. + Hard limit for the number of replication streams other than those with highest-priority. @@ -5861,4 +5935,12 @@ directories when permissions is enabled. Default value is false; + + + dfs.protected.subdirectories.enable + false + whether to protect the subdirectories of directories which + set on fs.protected.directories. + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html index 8341b7b84c16f..39680e84a8c82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html @@ -99,52 +99,6 @@ {/dn.BPServiceActorInfo} -{#ozone.enabled} - - - - - - - - - - - - {#ozone.SCMServers} - - - - - - - - {/ozone.SCMServers} -
    SCM AddressStatusVersionMissed countLast heartbeat
    {addressString}{state}{versionNumber}{missedCount}s{lastSuccessfulHeartbeat|elapsed|fmt_time}
    - - - - - - - - - - - - - {#ozone.LocationReport} - - - - - - - - {/ozone.LocationReport} -
    IDCapacityRemainingSCM usedfailed
    {id}{capacity|fmt_bytes}{remaining|fmt_bytes}{scmUsed|fmt_bytes}{failed}
    -{/ozone.enabled} - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js index f1a5f4a48c878..8ad7a65708f4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js @@ -108,7 +108,8 @@ */ function view_perm_details(e, filename, abs_path, perms) { $('.explorer-perm-links').popover('destroy'); - e.popover({html: true, content: $('#explorer-popover-perm-info').html(), trigger: 'focus'}) + setTimeout(function() { + e.popover({html: true,sanitize: false, content: $('#explorer-popover-perm-info').html(), trigger: 'focus'}) .on('shown.bs.popover', function(e) { var popover = $(this), parent = popover.parent(); //Convert octal to binary permissions @@ -122,6 +123,7 @@ }); }) .popover('show'); + }, 100); } // Use WebHDFS to set permissions on an absolute path diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md index bc5ac30769463..d199c06afb740 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md @@ -693,4 +693,42 @@ Usage: `hdfs debug recoverLease -path [-retries ]` | [`-path` *path*] | HDFS path for which to recover the lease. | | [`-retries` *num-retries*] | Number of times the client will retry calling recoverLease. The default number of retries is 1. | -Recover the lease on the specified path. The path must reside on an HDFS filesystem. The default number of retries is 1. +Recover the lease on the specified path. The path must reside on an HDFS file system. The default number of retries is 1. + +dfsadmin with ViewFsOverloadScheme +---------------------------------- + +Usage: `hdfs dfsadmin -fs ` + +| COMMAND\_OPTION | Description | +|:---- |:---- | +| `-fs` *child fs mount link URI* | Its a logical mount link path to child file system in ViewFS world. This uri typically formed as src mount link prefixed with fs.defaultFS. Please note, this is not an actual child file system uri, instead its a logical mount link uri pointing to actual child file system| + +Example command usage: + `hdfs dfsadmin -fs hdfs://nn1 -safemode enter` + +In ViewFsOverloadScheme, we may have multiple child file systems as mount point mappings as shown in [ViewFsOverloadScheme Guide](./ViewFsOverloadScheme.html). Here -fs option is an optional generic parameter supported by dfsadmin. When users want to execute commands on one of the child file system, they need to pass that file system mount mapping link uri to -fs option. Let's take an example mount link configuration and dfsadmin command below. + +Mount link: + +```xml + + fs.defaultFS + hdfs://MyCluster1 + + + + fs.viewfs.mounttable.MyCluster1./user + hdfs://MyCluster2/user + hdfs://MyCluster2/user + mount link path: /user + mount link uri: hdfs://MyCluster1/user + mount target uri for /user: hdfs://MyCluster2/user --> + +``` + +If user wants to talk to `hdfs://MyCluster2/`, then they can pass -fs option (`-fs hdfs://MyCluster1/user`) +Since /user was mapped to a cluster `hdfs://MyCluster2/user`, dfsadmin resolve the passed (`-fs hdfs://MyCluster1/user`) to target fs (`hdfs://MyCluster2/user`). +This way users can get the access to all hdfs child file systems in ViewFsOverloadScheme. +If there is no `-fs` option provided, then it will try to connect to the configured fs.defaultFS cluster if a cluster running with the fs.defaultFS uri. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsBlockPlacementPolicies.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsBlockPlacementPolicies.md new file mode 100644 index 0000000000000..4550f0441b62e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsBlockPlacementPolicies.md @@ -0,0 +1,165 @@ + + +BlockPlacementPolicies +====================== + + +##Introduction +By default HDFS supports BlockPlacementPolicyDefault. Where one block on local and copy on 2 different nodes of same remote rack. Additional to this HDFS supports several different pluggable block placement policies. Users can choose the policy based on their infrastructure and use case. This document describes the detailed information about the type of policies with its use cases and configuration. + + +## BlockPlacementPolicyRackFaultTolerant + +BlockPlacementPolicyRackFaultTolerant can be used to split the placement of blocks across multiple rack.By default with replication of 3 BlockPlacementPolicyDefault will put one replica on the local machine if the writer is on a datanode, otherwise on a random datanode in the same rack as that of the writer, another replica on a node in a different (remote) rack, and the last on a different node in the same remote rack. So totally 2 racks will be used, in sceneraio like 2 racks going down at the same time will cause data inavailability where using BlockPlacementPolicyRackFaultTolerant will helop in placing 3 blocks on 3 different racks. + +For more details check [HDFS-7891](https://issues.apache.org/jira/browse/HDFS-7891) + +![Rack Fault Tolerant Policy](images/RackFaultTolerant.jpg) + + **Configurations :** + +- hdfs-site.xml + +```xml + + dfs.block.replicator.classname + org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant + +``` + + +## BlockPlacementPolicyWithNodeGroup + +With new 3 layer hierarchical topology, a node group level got introduced, which maps well onto a infrastructure that is based on a virtulized environment. In Virtualized environment multiple vm's will be hosted on same physical machine. Vm's on the same physical host are affected by the same hardware failure. So mapping the physical host a node groups this block placement gurantees that it will never place more than one replica on the same node group (physical host), in case of node group failure, only one replica will be lost at the maximum. + + **Configurations :** + +- core-site.xml + +```xml + + net.topology.impl + org.apache.hadoop.net.NetworkTopologyWithNodeGroup + + + net.topology.nodegroup.aware + true + +``` + +- hdfs-site.xml + +```xml + + dfs.block.replicator.classname + + org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup + + +``` + +- Topology script + +Topology script is the same as the examples above, the only difference is, +instead of returning only **/{rack}**, the script should return +**/{rack}/{nodegroup}**. Following is an example topology mapping table: + +``` +192.168.0.1 /rack1/nodegroup1 +192.168.0.2 /rack1/nodegroup1 +192.168.0.3 /rack1/nodegroup2 +192.168.0.4 /rack1/nodegroup2 +192.168.0.5 /rack2/nodegroup3 +192.168.0.6 /rack2/nodegroup3 +``` + +For more details check [HDFS-8468](https://issues.apache.org/jira/browse/HADOOP-8468) + +## BlockPlacementPolicyWithUpgradeDomain + +To address the limitation of block placement policy on rolling upgrade, the concept of upgrade domain has been added to HDFS via a new block placement policy. The idea is to group datanodes in a new dimension called upgrade domain, in addition to the existing rack-based grouping. For example, we can assign all datanodes in the first position of any rack to upgrade domain ud_01, nodes in the second position to upgrade domain ud_02 and so on. +It will make sure replicas of any given block are distributed across machines from different upgrade domains. By default, 3 replicas of any given block are placed on 3 different upgrade domains. This means all datanodes belonging to a specific upgrade domain collectively won’t store more than one replica of any block. + +For more details check [HDFS-9006](https://issues.apache.org/jira/browse/HDFS-9006) + +Detailed info about configuration [Upgrade Domain Policy](HdfsUpgradeDomain.html) + +## AvailableSpaceBlockPlacementPolicy + +The AvailableSpaceBlockPlacementPolicy is a space balanced block placement policy. It is similar to BlockPlacementPolicyDefault but will choose low used percent datanodes for new blocks with a little high possibility. + + **Configurations :** + +- hdfs-site.xml + +```xml + + dfs.block.replicator.classname + org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceBlockPlacementPolicy + + + + dfs.namenode.available-space-block-placement-policy.balanced-space-preference-fraction + 0.6 + + Special value between 0 and 1, noninclusive. Increases chance of + placing blocks on Datanodes with less disk space used. + + + + + + dfs.namenode.available-space-block-placement-policy.balance-local-node + + false + + If true, balances the local node too. + + +``` + +For more details check [HDFS-8131](https://issues.apache.org/jira/browse/HDFS-8131) + +## AvailableSpaceRackFaultTolerantBlockPlacementPolicy + +The AvailableSpaceRackFaultTolerantBlockPlacementPolicy is a space balanced block placement policy similar to AvailableSpaceBlockPlacementPolicy. It extends BlockPlacementPolicyRackFaultTolerant and distributes the blocks +amongst maximum number of racks possible and at the same time will try to choose datanodes with low used percent with high probability. + + **Configurations :** + +- hdfs-site.xml + +```xml + + dfs.block.replicator.classname + org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceRackFaultTolerantBlockPlacementPolicy + + + + dfs.namenode.available-space-rack-fault-tolerant-block-placement-policy.balanced-space-preference-fraction + 0.6 + + Only used when the dfs.block.replicator.classname is set to + org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceRackFaultTolerantBlockPlacementPolicy. + Special value between 0 and 1, noninclusive. Increases chance of + placing blocks on Datanodes with less disk space used. More the value near 1 + more are the chances of choosing the datanode with less percentage of data. + Similarly as the value moves near 0, the chances of choosing datanode with + high load increases as the value reaches near 0. + + +``` + +For more details check [HDFS-15288](https://issues.apache.org/jira/browse/HDFS-15288) \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md index ff43e45191880..894ff7e78e863 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md @@ -139,6 +139,9 @@ then that replica is preferred to satisfy the read request. If HDFS cluster spans multiple data centers, then a replica that is resident in the local data center is preferred over any remote replica. +### Block Placement Policies +As mentioned above when the replication factor is three, HDFS’s placement policy is to put one replica on the local machine if the writer is on a datanode, otherwise on a random datanode in the same rack as that of the writer, another replica on a node in a different (remote) rack, and the last on a different node in the same remote rack. If the replication factor is greater than 3, the placement of the 4th and following replicas are determined randomly while keeping the number of replicas per rack below the upper limit (which is basically (replicas - 1) / racks + 2). Additional to this HDFS supports 4 different pluggable [Block Placement Policies](HdfsBlockPlacementPolicies.html). Users can choose the policy based on their infrastructre and use case. By default HDFS supports BlockPlacementPolicyDefault. + ### Safemode On startup, the NameNode enters a special state called Safemode. Replication of data blocks does not occur when the NameNode is in the Safemode state. The NameNode receives Heartbeat and Blockreport messages from the DataNodes. A Blockreport contains the list of data blocks that a DataNode is hosting. Each block has a specified minimum number of replicas. A block is considered safely replicated when the minimum number of replicas of that data block has checked in with the NameNode. After a configurable percentage of safely replicated data blocks checks in with the NameNode (plus an additional 30 seconds), the NameNode exits the Safemode state. It then determines the list of data blocks (if any) that still have fewer than the specified number of replicas. The NameNode then replicates these blocks to other DataNodes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md index f851ef6a656a0..b29a888475941 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md @@ -361,6 +361,75 @@ resume its work, it's a good idea to provision some sort of cron job to purge su Delegation tokens for the cluster to which you are submitting the job (including all mounted volumes for that cluster’s mount table), and for input and output paths to your map-reduce job (including all volumes mounted via mount tables for the specified input and output paths) are all handled automatically. In addition, there is a way to add additional delegation tokens to the base cluster configuration for special circumstances. +Don't want to change scheme or difficult to copy mount-table configurations to all clients? +------------------------------------------------------------------------------------------- + +Please refer to the [View File System Overload Scheme Guide](./ViewFsOverloadScheme.html) + +Regex Pattern Based Mount Points +-------------------------------- + +The view file system mount points were a Key-Value based mapping system. It is not friendly for user cases which mapping config could be abstracted to rules. E.g. Users want to provide a GCS bucket per user and there might be thousands of users in total. The old key-value based approach won't work well for several reasons: + +1. The mount table is used by FileSystem clients. There's a cost to spread the config to all clients and we should avoid it if possible. The [View File System Overload Scheme Guide](./ViewFsOverloadScheme.html) could help the distribution by central mount table management. But the mount table still have to be updated on every change. The change could be greatly avoided if provide a rule-based mount table. + +2. The client have to understand all the KVs in the mount table. This is not ideal when the mountable grows to thousands of items. E.g. thousands of file systems might be initialized even users only need one. And the config itself will become bloated at scale. + +### Understand the Difference + +In the key-value based mount table, view file system treats every mount point as a partition. There's several file system APIs which will lead to operation on all partitions. E.g. there's an HDFS cluster with multiple mount. Users want to run “hadoop fs -put file viewfs://hdfs.namenode.apache.org/tmp/” cmd to copy data from local disk to our HDFS cluster. The cmd will trigger ViewFileSystem to call setVerifyChecksum() method which will initialize the file system for every mount point. +For a regex rule based mount table entry, we couldn't know what's corresponding path until parsing. So the regex based mount table entry will be ignored on such cases. The file system (ChRootedFileSystem) will be created upon accessing. But the underlying file system will be cached by inner cache of ViewFileSystem. +```xml + + fs.viewfs.rename.strategy + SAME_FILESYSTEM_ACROSS_MOUNTPOINT + +``` + +### Basic Regex Link Mapping Config +Here's an example of base regex mount point config. ${username} is the named capture group in Java Regex. +```xml + + fs.viewfs.mounttable.hadoop-nn.linkRegx./^(?\\w+) + gs://${username}.hadoop.apache.org/ + +``` +Parsing example. +```bash +viewfs://hadoop-nn/user1/dir1 => gs://user1.hadoop.apache.org/dir1 +viewfs://hadoop-nn/user2 => gs://user2.hadoop.apache.org/ +``` +The src/key’s format are +```bash +fs.viewfs.mounttable.${VIEWNAME}.linkRegx.${REGEX_STR} +``` + +### Regex Link Mapping With Interceptors +Interceptor is one mechanism introduced to modify source or target in the resolution process. It’s optional and could be used to satisfy user cases such as replace specific character or replace some word. Interceptor will only work for regex mount point. RegexMountPointResolvedDstPathReplaceInterceptor is the only build-in interceptor now. + +Here’s an example regex mount point entry with RegexMountPointResolvedDstPathReplaceInterceptor set. + +```xml + + fs.viewfs.mounttable.hadoop-nn.linkRegx.replaceresolveddstpath:_:-#./^(?\\w+) + gs://${username}.hadoop.apache.org/ + +``` +The ```replaceresolveddstpath:_:-``` is an interceptor setting. “replaceresolveddstpath” is the interceptor type, “_” is the string to replace and “-” is the string after replace. + +Parsing example. +```bash +viewfs://hadoop-nn/user_ad/dir1 => gs://user-ad.hadoop.apache.org/dir1 +viewfs://hadoop-nn/user_ad_click => gs://user-ad-click.hadoop.apache.org/ +``` +The src/key’s format are +```bash +fs.viewfs.mounttable.${VIEWNAME}.linkRegx.${REGEX_STR} +fs.viewfs.mounttable.${VIEWNAME}.linkRegx.${interceptorSettings}#.${srcRegex} +``` + + + Appendix: A Mount Table Configuration Example --------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md new file mode 100644 index 0000000000000..f3eb336da6121 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFsOverloadScheme.md @@ -0,0 +1,205 @@ + + +View File System Overload Scheme Guide +====================================== + + + +Introduction +------------ + +The View File System Overload Scheme introduced to solve two key challenges with the View File System(ViewFS). The first problem is, to use ViewFS, users need to update fs.defaultFS with viewfs scheme (`viewfs://`). The second problem is that users need to copy the mount-table configurations to all the client nodes. +The ViewFileSystemOverloadScheme is addressing these challenges. + +View File System Overload Scheme +-------------------------------- + +### Details + +The View File System Overload Scheme is an extension to the View File System. This will allow users to continue to use their existing fs.defaultFS configured scheme or any new scheme name instead of using scheme `viewfs`. +Mount link configurations key, value formats are same as in [ViewFS Guide](./ViewFs.html). +If a user wants to continue use the same fs.defaultFS and wants to have more mount points, then mount link configurations should have the ViewFileSystemOverloadScheme initialized uri's hostname as the mount table name. +Example if fs.defaultFS is `hdfs://mycluster`, then the mount link configuration key name should be like in the following format `fs.viewfs.mounttable.*mycluster*.link.`. +Even if the initialized fs uri has hostname:port, it will simply ignore the port number and only consider the hostname as the mount table name. We will discuss more example configurations in following sections. +If there are no mount links configured with the initializing uri's hostname as the mount table name, then it will automatically consider the current uri as fallback(`fs.viewfs.mounttable.*mycluster*.linkFallback`) target fs uri. +If the initialized uri contains path part, it will consider only scheme and authority part, but not the path part. Example, if the initialized uri contains `hdfs://mycluster/data`, it will consider only `hdfs://mycluster` as fallback target fs uri. +The path part `data` will be ignored. + +Another important improvement with the ViewFileSystemOverloadScheme is, administrators need not copy the `mount-table.xml` configuration file to 1000s of client nodes. Instead, they can keep the mount-table configuration file in a Hadoop compatible file system. So, keeping the configuration file in a central place makes administrators life easier as they can update mount-table in single place. + +### Enabling View File System Overload Scheme + +To use this class, the following configurations needed to be added in core-site.xml file. + +```xml + + fs..impl + org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme + +``` +Here `` should be same as the uri-scheme configured in fs.defautFS. For example if fs.defaultFS was configured with `hdfs://mycluster`, then the above configuration would be like below: + +```xml + + fs.hdfs.impl + org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme + +``` + +### Example Configurations + +**Example 1:** + +If users want some of their existing cluster (`hdfs://cluster`) data to mount with hdfs(`hdfs://cluster`) and other object store clusters(`o3fs://bucket1.volume1.omhost/`, `s3a://bucket1/`), the following example configurations can show how to add mount links. + + +```xml + + fs.viewfs.mounttable.cluster.link./user + hdfs://cluster/user + + + + fs.viewfs.mounttable.cluster.link./data + o3fs://bucket1.volume1/data + + + + fs.viewfs.mounttable.cluster.link./backup + s3a://bucket1/backup/ + +``` + +Let's consider the following operations to understand where these operations will be delegated based on mount links. + + *Op1:* Create a file with the the path `hdfs://cluster/user/fileA`, then physically this file will be created at `hdfs://cluster/user/fileA`. This delegation happened based on the first configuration parameter in above configurations. Here `/user` mapped to `hdfs://cluster/user/`. + + *Op2:* Create a file the the path `hdfs://cluster/data/datafile`, then this file will be created at `o3fs://bucket1.volume1.omhost/data/datafile`. This delegation happened based on second configurations parameter in above configurations. Here `/data` was mapped with `o3fs://bucket1.volume1.omhost/data/`. + + *Op3:* Create a file with the the path `hdfs://cluster/backup/data.zip`, then physically this file will be created at `s3a://bucket1/backup/data.zip`. This delegation happened based on the third configuration parameter in above configurations. Here `/backup` was mapped to `s3a://bucket1/backup/`. + + +**Example 2:** + +If users want some of their existing cluster (`s3a://bucketA/`) data to mount with other hdfs cluster(`hdfs://cluster`) and object store clusters(`o3fs://bucket1.volume1.omhost/`, `s3a://bucketA/`), the following example configurations can show how to add mount links. + + +```xml + + fs.viewfs.mounttable.bucketA.link./user + hdfs://cluster/user + + + + fs.viewfs.mounttable.bucketA.link./data + o3fs://bucket1.volume1.omhost/data + + + + fs.viewfs.mounttable.bucketA.link./salesDB + s3a://bucketA/salesDB/ + +``` +Let's consider the following operations to understand to where these operations will be delegated based on mount links. + + *Op1:* Create a file with the the path `s3a://bucketA/user/fileA`, then this file will be created physically at `hdfs://cluster/user/fileA`. This delegation happened based on the first configuration parameter in above configurations. Here `/user` mapped to `hdfs://cluster/user`. + + *Op2:* Create a file the the path `s3a://bucketA/data/datafile`, then this file will be created at `o3fs://bucket1.volume1.omhost/data/datafile`. This delegation happened based on second configurations parameter in above configurations. Here `/data` was mapped with `o3fs://bucket1.volume1.omhost/data/`. + + *Op3:* Create a file with the the path `s3a://bucketA/salesDB/dbfile`, then physically this file will be created at `s3a://bucketA/salesDB/dbfile`. This delegation happened based on the third configuration parameter in above configurations. Here `/salesDB` was mapped to `s3a://bucket1/salesDB`. + +Note: In above examples we used create operation only, but the same mechanism applies to any other file system APIs here. + +The following picture shows how the different schemes can be used in ViewFileSystemOverloadScheme compared to the ViewFileSystem. + + + +Note: In ViewFsOverloadScheme, by default the mount links will not be represented as symlinks. The permission bits and isDirectory value will be propagated from the target directory/file. + +### Central Mount Table Configurations + +To enable central mount table configuration, we need to configure `fs.viewfs.mounttable.path` in `core-site.xml` with the value as the Hadoop compatible file system directory/file path, where the `mount-table..xml` file copied. Here versionNumber is an integer number and need to increase the version number and upload new file in same directory. + +The ViewFileSystemOverloadScheme always loads the highest version number `mount-table..xml`. Please don't replace the file with same name. Always increment the version number to take new file picked by newly initializing clients. Why we don't recommend to replace the files is that, some client might have already opened the connections to old mount-table files already and in middle of loading configuration files, and replacing files can make them fail. + +```xml + + fs.viewfs.mounttable.path + hdfs://cluster/config/mount-table-dir + +``` + If you are sure, you will never do updates to mount-table file, you can also configure file path directly like below. If you configure file path, it will not check any highest version number loading. Whatever file configured it will be loaded. However file name format should be same. + +```xml + + fs.viewfs.mounttable.path + hdfs://cluster/config/mount-table-dir/mount-table..xml + +``` +Note: we recommend not to configure mount-links in `core-site.xml` if you configure above valid path. Otherwise both mount links will be mixed and can lead to a confused behavior. + +If you copy the `mount-table..xml`, you may consider having big replication factor depending on your cluster size. So, that file will be available locally to majority of clients as applications(MR/YARN/HBASE..etc) use locality on HDFS when reading `mount-table..xml`. + +DFSAdmin commands with View File System Overload Scheme +------------------------------------------------------- + +Please refer to the [HDFSCommands Guide](./HDFSCommands.html#dfsadmin_with_ViewFsOverloadScheme) + +Accessing paths without authority +--------------------------------- + +Accessing paths like `hdfs:///foo/bar`, `hdfs:/foo/bar` or `viewfs:/foo/bar`, where the authority (cluster name or hostname) of the path is not specified, is very common. +This is especially true when the same code is expected to run on multiple clusters with different names or HDFS Namenodes. + +When `ViewFileSystemOverloadScheme` is used (as described above), and if (a) the scheme of the path being accessed is different from the scheme of the path specified as `fs.defaultFS` +and (b) if the path doesn't have an authority specified, accessing the path can result in an error like `Empty Mount table in config for viewfs://default/`. +For example, when the following configuration is used but a path like `viewfs:/foo/bar` or `viewfs:///foo/bar` is accessed, such an error arises. +```xml + + fs.hdfs.impl + org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme + + + + fs.defaultFS + hdfs://cluster/ + +``` + +### Solution +To avoid the above problem, the configuration `fs.viewfs.mounttable.default.name.key` has to be set to the name of the cluster, i.e, the following should be added to `core-site.xml` +```xml + + fs.viewfs.mounttable.default.name.key + cluster + +``` +The string in this configuration `cluster` should match the name of the authority in the value of `fs.defaultFS`. Further, the configuration should have a mount table +configured correctly as in the above examples, i.e., the configurations `fs.viewfs.mounttable.*cluster*.link.` should be set (note the same string +`cluster` is used in these configurations). + +Appendix: A Mount Table Configuration with XInclude +--------------------------------------------------- + +If users have a HTTP server in trusted network and don't need authentication mechanism to it, you can also place your mount-table.xml file in that server and configure + XInclude xml tag with `mount-table.xml` file. + +```xml + + + +``` + +The Apache Hadoop configuration has the capability to read the http urls from XInclude and load into configurations. If you choose this option, please don't configure mount-table configuration items in `core-site.xml` or at `fs.viewfs.mounttable.path`. Please note, Hadoop configuration XInclude does not use SPNego authentication when opening url. So, this will not work if http server where you placed `mount-table.xml` needs authentication. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 9e1b160d6f91f..203082f067a37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -3142,7 +3142,7 @@ See also: [`CREATESNAPSHOT`](#Create_Snapshot), [`DELETESNAPSHOT`](#Delete_Snaps | Description | A list of source paths. | | Type | String | | Default Value | \ | -| Valid Values | A list of comma seperated absolute FileSystem paths without scheme and authority. | +| Valid Values | A list of comma separated absolute FileSystem paths without scheme and authority. | | Syntax | Any string. | See also: [`CONCAT`](#Concat_Files) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/RackFaultTolerant.jpg b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/RackFaultTolerant.jpg new file mode 100644 index 0000000000000..8c05930389235 Binary files /dev/null and b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/RackFaultTolerant.jpg differ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/ViewFSOverloadScheme.png b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/ViewFSOverloadScheme.png new file mode 100644 index 0000000000000..b5029c1e0a098 Binary files /dev/null and b/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/ViewFSOverloadScheme.png differ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java index 90b4f11a66a31..f4c264c5e5d40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -71,8 +71,8 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.base.Preconditions; -import com.google.common.base.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import java.util.function.Supplier; /** * This class tests if EnhancedByteBufferAccess works correctly. @@ -358,7 +358,7 @@ public void testZeroCopyMmapCache() throws Exception { fsIn.close(); fsIn = fs.open(TEST_PATH); final ShortCircuitCache cache = ClientContext.get( - CONTEXT, conf).getShortCircuitCache(); + CONTEXT, conf).getShortCircuitCache(0); cache.accept(new CountingVisitor(0, 5, 5, 0)); results[0] = fsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); @@ -654,12 +654,12 @@ public void testZeroCopyReadOfCachedData() throws Exception { BLOCK_SIZE), byteBufferToArray(result2)); fsIn2.releaseBuffer(result2); fsIn2.close(); - + // check that the replica is anchored final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fs, TEST_PATH); final ShortCircuitCache cache = ClientContext.get( - CONTEXT, conf).getShortCircuitCache(); + CONTEXT, conf).getShortCircuitCache(0); waitForReplicaAnchorStatus(cache, firstBlock, true, true, 1); // Uncache the replica fs.removeCacheDirective(directiveId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 7027f3bc6da69..f8adf017332d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -25,7 +25,7 @@ import java.util.UUID; import java.util.regex.Pattern; -import com.google.common.collect.Ordering; +import org.apache.hadoop.thirdparty.com.google.common.collect.Ordering; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java index 54f4ed27379df..0efb33f5db200 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java @@ -35,7 +35,7 @@ public class TestHDFSContractMultipartUploader extends AbstractContractMultipartUploaderTest { protected static final Logger LOG = - LoggerFactory.getLogger(AbstractContractMultipartUploaderTest.class); + LoggerFactory.getLogger(TestHDFSContractMultipartUploader.class); @BeforeClass public static void createCluster() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java new file mode 100644 index 0000000000000..9d394c004924e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests that the NN startup is successful with ViewFSOverloadScheme. + */ +public class TestNNStartupWhenViewFSOverloadSchemeEnabled { + private MiniDFSCluster cluster; + private static final String FS_IMPL_PATTERN_KEY = "fs.%s.impl"; + private static final String HDFS_SCHEME = "hdfs"; + private static final Configuration CONF = new Configuration(); + + @BeforeClass + public static void setUp() { + CONF.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + CONF.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); + CONF.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME), + ViewFileSystemOverloadScheme.class.getName()); + CONF.set(String + .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + HDFS_SCHEME), DistributedFileSystem.class.getName()); + // By default trash interval is 0. To trigger TrashEmptier, let's set it to + // >0 value. + CONF.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 100); + } + + /** + * Tests that the HA mode NameNode startup is successful when + * ViewFSOverloadScheme configured. + */ + @Test(timeout = 30000) + public void testHANameNodeAndDataNodeStartup() throws Exception { + cluster = new MiniDFSCluster.Builder(CONF) + .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1) + .waitSafeMode(false).build(); + cluster.waitActive(); + cluster.transitionToActive(0); + } + + /** + * Tests that the NameNode startup is successful when ViewFSOverloadScheme + * configured. + */ + @Test(timeout = 30000) + public void testNameNodeAndDataNodeStartup() throws Exception { + cluster = + new MiniDFSCluster.Builder(CONF).numDataNodes(1).waitSafeMode(false) + .build(); + cluster.waitActive(); + } + + @After + public void shutdownCluster() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java new file mode 100644 index 0000000000000..5e2f42b77a3a7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.Before; + +/** + * Tests ViewFileSystemOverloadScheme with configured mount links. + */ +public class TestViewFSOverloadSchemeWithMountTableConfigInHDFS + extends TestViewFileSystemOverloadSchemeWithHdfsScheme { + private Path oldVersionMountTablePath; + private Path newVersionMountTablePath; + + @Before + @Override + public void setUp() throws IOException { + super.setUp(); + String mountTableDir = + URI.create(getConf().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)) + .toString() + "/MountTable/"; + getConf().set(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH, mountTableDir); + FileSystem fs = new ViewFileSystemOverloadScheme.ChildFsGetter("hdfs") + .getNewInstance(new Path(mountTableDir).toUri(), getConf()); + fs.mkdirs(new Path(mountTableDir)); + String oldVersionMountTable = "mount-table.30.xml"; + String newVersionMountTable = "mount-table.31.xml"; + oldVersionMountTablePath = new Path(mountTableDir, oldVersionMountTable); + newVersionMountTablePath = new Path(mountTableDir, newVersionMountTable); + fs.createNewFile(oldVersionMountTablePath); + fs.createNewFile(newVersionMountTablePath); + } + + /** + * This method saves the mount links in a hdfs file newVersionMountTable. + * Since this file has highest version, this should be loaded by + * ViewFSOverloadScheme. + */ + @Override + void addMountLinks(String mountTable, String[] sources, String[] targets, + Configuration config) throws IOException, URISyntaxException { + ViewFsTestSetup.addMountLinksToFile(mountTable, sources, targets, + newVersionMountTablePath, getConf()); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index b8bed1df84a6e..b3836956c79db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -299,14 +299,16 @@ private void testNflyRepair(NflyFSystem.NflyKey repairKey) new URI(uri2.getScheme(), uri2.getAuthority(), "/", null, null) }; + String clusterName = "mycluster"; final Configuration testConf = new Configuration(conf); + testConf.set(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + clusterName); testConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); final String testString = "Hello Nfly!"; final Path nflyRoot = new Path("/nflyroot"); - ConfigUtil.addLinkNfly(testConf, - Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, + clusterName, nflyRoot.toString(), "minReplication=2," + repairKey + "=true", testUris); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java index 5fb7c3b07f41f..e7317608147be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.fs.viewfs; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -26,15 +29,18 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.HashSet; import javax.security.auth.login.LoginException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; @@ -66,7 +72,7 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest { "/tmp/TestViewFileSystemLinkFallback"; private final static Logger LOG = LoggerFactory.getLogger( TestViewFileSystemLinkFallback.class); - + private static URI viewFsDefaultClusterUri; @Override protected FileSystemTestHelper createFileSystemHelper() { @@ -90,6 +96,8 @@ public static void clusterSetupAtBeginning() throws IOException, FS_HDFS[i] = cluster.getFileSystem(i); } fsDefault = FS_HDFS[FS_INDEX_DEFAULT]; + viewFsDefaultClusterUri = new URI(FsConstants.VIEWFS_SCHEME, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); } @AfterClass @@ -261,4 +269,651 @@ public void testConfLinkFallbackWithMountPoint() throws Exception { e.getMessage().contains(expectedErrorMsg)); } } + + /** + * This tests whether the fallback link gets listed for list operation + * of root directory of mount table. + * @throws Exception + */ + @Test + public void testListingWithFallbackLink() throws Exception { + Path dir1 = new Path(targetTestRoot, "fallbackDir/dir1"); + fsTarget.mkdirs(dir1); + String clusterName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; + URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, clusterName, + "/", null, null); + + HashSet beforeFallback = new HashSet<>(); + try(FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + for (FileStatus stat : vfs.listStatus(new Path(viewFsUri.toString()))) { + beforeFallback.add(stat.getPath()); + } + } + + ConfigUtil.addLinkFallback(conf, clusterName, + new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsUri, conf)) { + HashSet afterFallback = new HashSet<>(); + for (FileStatus stat : vfs.listStatus(new Path(viewFsUri.toString()))) { + afterFallback.add(stat.getPath()); + } + afterFallback.removeAll(beforeFallback); + assertTrue("Listing didn't include fallback link", + afterFallback.size() == 1); + Path[] fallbackArray = new Path[afterFallback.size()]; + afterFallback.toArray(fallbackArray); + Path expected = new Path(viewFsUri.toString(), "dir1"); + assertEquals("Path did not match", + expected, fallbackArray[0]); + + // Create a directory using the returned fallback path and verify + Path childDir = new Path(fallbackArray[0], "child"); + vfs.mkdirs(childDir); + FileStatus status = fsTarget.getFileStatus(new Path(dir1, "child")); + assertTrue(status.isDirectory()); + assertTrue(vfs.getFileStatus(childDir).isDirectory()); + } + } + + /** + * This tests whether fallback directory gets shaded during list operation + * of root directory of mount table when the same directory name exists as + * mount point as well as in the fallback linked directory. + * @throws Exception + */ + @Test + public void testListingWithFallbackLinkWithSameMountDirectories() + throws Exception { + // Creating two directories under the fallback directory. + // "user" directory already exists as configured mount point. + Path dir1 = new Path(targetTestRoot, "fallbackDir/user"); + Path dir2 = new Path(targetTestRoot, "fallbackDir/user1"); + fsTarget.mkdirs(dir1); + fsTarget.mkdirs(dir2); + String clusterName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; + HashSet beforeFallback = new HashSet<>(); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + for (FileStatus stat : vfs + .listStatus(new Path(viewFsDefaultClusterUri.toString()))) { + beforeFallback.add(stat.getPath()); + } + } + ConfigUtil.addLinkFallback(conf, clusterName, + new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + HashSet afterFallback = new HashSet<>(); + for (FileStatus stat : vfs + .listStatus(new Path(viewFsDefaultClusterUri.toString()))) { + afterFallback.add(stat.getPath()); + } + afterFallback.removeAll(beforeFallback); + assertEquals("The same directory name in fallback link should be shaded", + 1, afterFallback.size()); + Path[] fallbackArray = new Path[afterFallback.size()]; + // Only user1 should be listed as fallback link + Path expected = new Path(viewFsDefaultClusterUri.toString(), "user1"); + assertEquals("Path did not match", + expected, afterFallback.toArray(fallbackArray)[0]); + + // Create a directory using the returned fallback path and verify + Path childDir = new Path(fallbackArray[0], "child"); + vfs.mkdirs(childDir); + FileStatus status = fsTarget.getFileStatus(new Path(dir2, "child")); + assertTrue(status.isDirectory()); + assertTrue(vfs.getFileStatus(childDir).isDirectory()); + } + } + + /** + * Tests ListStatus on non-link parent with fallback configured. + * =============================Example.====================================== + * ===== Fallback path tree =============== Mount Path Tree ================== + * =========================================================================== + * * / ***** / ***************** + * * / ***** / ***************** + * * user1 ***** user1 ***************** + * * / ***** / ***************** + * * hive ***** hive ***************** + * * / \ ***** / ***************** + * * warehouse warehouse1 ***** warehouse ***************** + * * (-rwxr--r--) ***** (-r-xr--r--) ***************** + * * / ***** / ***************** + * * partition-0 ***** partition-0 ***************** + * =========================================================================== + * =========================================================================== + * *** ls /user1/hive ********* + * *** viewfs://default/user1/hive/warehouse (-rwxr--r--) ********* + * *** viewfs://default/user1/hive/warehouse1 ********* + * =========================================================================== + */ + @Test + public void testListingWithFallbackLinkWithSameMountDirectoryTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + // Creating multiple directories path under the fallback directory. + // "/user1/hive/warehouse/partition-0" directory already exists as + // configured mount point. + Path dir1 = new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"); + Path dir2 = new Path(targetTestRoot, "fallbackDir/user1/hive/warehouse1"); + fsTarget.mkdirs(dir1); + fsTarget.mkdirs(dir2); + fsTarget.setPermission(new Path(targetTestRoot, "fallbackDir/user1/hive/"), + FsPermission.valueOf("-rwxr--r--")); + + HashSet beforeFallback = new HashSet<>(); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + for (FileStatus stat : vfs.listStatus( + new Path(viewFsDefaultClusterUri.toString(), "/user1/hive/"))) { + beforeFallback.add(stat.getPath()); + } + } + ConfigUtil + .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + HashSet afterFallback = new HashSet<>(); + for (FileStatus stat : vfs.listStatus( + new Path(viewFsDefaultClusterUri.toString(), "/user1/hive/"))) { + afterFallback.add(stat.getPath()); + if (dir1.getName().equals(stat.getPath().getName())) { + // make sure fallback dir listed out with correct permissions, but not + // with link permissions. + assertEquals(FsPermission.valueOf("-rwxr--r--"), + stat.getPermission()); + } + } + //viewfs://default/user1/hive/warehouse + afterFallback.removeAll(beforeFallback); + assertEquals("The same directory name in fallback link should be shaded", + 1, afterFallback.size()); + } + } + + /** + * Tests ListStatus on link parent with fallback configured. + * =============================Example.====================================== + * ===== Fallback path tree =============== Mount Path Tree ================== + * =========================================================================== + * * / ***** / ********** + * * / ***** / ********** + * * user1 ***** user1 ********** + * * / ***** / ********** + * * hive ***** hive ********** + * * / \ ***** / ********** + * * warehouse warehouse1 ***** warehouse ********** + * * (-rwxr--r--) ***** (-r-xr--r--) ********** + * * / ***** / ********** + * * partition-0 ***** partition-0 ---> targetTestRoot ********** + * * ***** (-r-xr--r--) (-rwxr--rw-) ********** + * =========================================================================== + * =========================================================================== + * *** ls /user1/hive/warehouse ** + * *** viewfs://default/user1/hive/warehouse/partition-0 (-rwxr--rw-) ** + * =========================================================================== + */ + @Test + public void testLSOnLinkParentWithFallbackLinkWithSameMountDirectoryTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + // Creating multiple directories path under the fallback directory. + // "/user1/hive/warehouse/partition-0" directory already exists as + // configured mount point. + Path dir1 = new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"); + Path dir2 = new Path(targetTestRoot, "fallbackDir/user1/hive/warehouse1"); + fsTarget.mkdirs(dir1); + fsTarget.mkdirs(dir2); + fsTarget.setPermission(new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"), + FsPermission.valueOf("-rwxr--r--")); + fsTarget.setPermission(targetTestRoot, FsPermission.valueOf("-rwxr--rw-")); + + HashSet beforeFallback = new HashSet<>(); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + for (FileStatus stat : vfs.listStatus( + new Path(viewFsDefaultClusterUri.toString(), + "/user1/hive/warehouse/"))) { + beforeFallback.add(stat.getPath()); + } + } + ConfigUtil + .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + HashSet afterFallback = new HashSet<>(); + for (FileStatus stat : vfs.listStatus( + new Path(viewFsDefaultClusterUri.toString(), + "/user1/hive/warehouse/"))) { + afterFallback.add(stat.getPath()); + if (dir1.getName().equals(stat.getPath().getName())) { + // make sure fallback dir listed out with correct permissions, but not + // with link permissions. + assertEquals(FsPermission.valueOf("-rwxr--rw-"), + stat.getPermission()); + } + } + afterFallback.removeAll(beforeFallback); + assertEquals("Just to make sure paths are same.", 0, + afterFallback.size()); + } + } + + /** + * Tests ListStatus on root with fallback configured. + * =============================Example.====================================== + * ===== Fallback path tree =============== Mount Path Tree ================== + * =========================================================================== + * * / / ***** / *** + * * / / ***** / *** + * * user1 user2 ***** user1 ---> targetTestRoot *** + * *(-r-xr--r--) (-r-xr--r--) ***** (-rwxr--rw-) *** + * =========================================================================== + * =========================================================================== + * *** ls /user1/hive/warehouse ** + * *** viewfs://default/user1(-rwxr--rw-) ** + * *** viewfs://default/user2(-r-xr--r--) ** + * =========================================================================== + */ + @Test + public void testLSOnRootWithFallbackLinkWithSameMountDirectories() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil + .addLink(conf, "/user1", new Path(targetTestRoot.toString()).toUri()); + // Creating multiple directories path under the fallback directory. + // "/user1" directory already exists as configured mount point. + Path dir1 = new Path(targetTestRoot, "fallbackDir/user1"); + Path dir2 = new Path(targetTestRoot, "fallbackDir/user2"); + fsTarget.mkdirs(dir1); + fsTarget.mkdirs(dir2, FsPermission.valueOf("-rwxr--r--")); + fsTarget.setPermission(targetTestRoot, FsPermission.valueOf("-rwxr--rw-")); + + HashSet beforeFallback = new HashSet<>(); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + for (FileStatus stat : vfs + .listStatus(new Path(viewFsDefaultClusterUri.toString(), "/"))) { + beforeFallback.add(stat.getPath()); + } + } + ConfigUtil + .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + HashSet afterFallback = new HashSet<>(); + for (FileStatus stat : vfs + .listStatus(new Path(viewFsDefaultClusterUri.toString(), "/"))) { + afterFallback.add(stat.getPath()); + if (dir1.getName().equals(stat.getPath().getName())) { + // make sure fallback dir listed out with correct permissions, but not + // with link permissions. + assertEquals(FsPermission.valueOf("-rwxr--rw-"), + stat.getPermission()); + } else { + assertEquals("Path is: " + stat.getPath(), + FsPermission.valueOf("-rwxr--r--"), stat.getPermission()); + } + } + afterFallback.removeAll(beforeFallback); + assertEquals(1, afterFallback.size()); + assertEquals("/user2 dir from fallback should be listed.", "user2", + afterFallback.iterator().next().getName()); + } + } + + @Test + public void testLSOnLinkParentWhereMountLinkMatchesWithAFileUnderFallback() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, true); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/part-0", + new Path(targetTestRoot.toString()).toUri()); + // Create a file path in fallback matching to the path of mount link. + Path file1 = + new Path(targetTestRoot, "fallbackDir/user1/hive/warehouse/part-0"); + fsTarget.createNewFile(file1); + Path dir2 = new Path(targetTestRoot, "fallbackDir/user1/hive/warehouse1"); + fsTarget.mkdirs(dir2); + + ConfigUtil + .addLinkFallback(conf, new Path(targetTestRoot, "fallbackDir").toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + for (FileStatus stat : vfs.listStatus( + new Path(viewFsDefaultClusterUri.toString(), + "/user1/hive/warehouse/"))) { + if (file1.getName().equals(stat.getPath().getName())) { + // Link represents as symlink. + assertFalse(stat.isFile()); + assertFalse(stat.isDirectory()); + assertTrue(stat.isSymlink()); + Path fileUnderDir = new Path(stat.getPath(), "check"); + assertTrue(vfs.mkdirs(fileUnderDir)); // Creating dir under target + assertTrue(fsTarget + .exists(new Path(targetTestRoot, fileUnderDir.getName()))); + } + } + } + } + + /** + * Tests that directory making should be successful when the parent directory + * is same as the existent fallback directory. The new dir should be created + * in fallback instead failing. + */ + @Test + public void testMkdirsOfLinkParentWithFallbackLinkWithSameMountDirectoryTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path dir1 = new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"); + fsTarget.mkdirs(dir1); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path p = new Path("/user1/hive/warehouse/test"); + Path test = Path.mergePaths(fallbackTarget, p); + assertFalse(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(p)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests that directory making should be successful when attempting to create + * the root directory as it's already exist. + */ + @Test + public void testMkdirsOfRootWithFallbackLinkAndMountWithSameDirTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil + .addLink(conf, "/user1", new Path(targetTestRoot.toString()).toUri()); + Path dir1 = new Path(targetTestRoot, "fallbackDir/user1"); + fsTarget.mkdirs(dir1); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path p = new Path("/"); + Path test = Path.mergePaths(fallbackTarget, p); + assertTrue(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(p)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests the making of a new directory which is not matching to any of + * internal directory under the root. + */ + @Test + public void testMkdirsOfNewDirWithOutMatchingToMountOrFallbackDirTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + // user2 does not exist in fallback + Path p = new Path("/user2"); + Path test = Path.mergePaths(fallbackTarget, p); + assertFalse(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(p)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests that when the parent dirs does not exist in fallback but the parent + * dir is same as mount internal directory, then we create parent structure + * (mount internal directory tree structure) in fallback. + */ + @Test + public void testMkdirsWithFallbackLinkWithMountPathMatchingDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + //user1 does not exist in fallback + Path immediateLevelToInternalDir = new Path("/user1/test"); + Path test = Path.mergePaths(fallbackTarget, immediateLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(immediateLevelToInternalDir)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests that when the parent dirs does not exist in fallback but the + * immediate parent dir is not same as mount internal directory, then we + * create parent structure (mount internal directory tree structure) in + * fallback. + */ + @Test + public void testMkdirsOfDeepTreeWithFallbackLinkAndMountPathMatchingDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + //user1 does not exist in fallback + Path multipleLevelToInternalDir = new Path("/user1/test/test"); + Path test = Path.mergePaths(fallbackTarget, multipleLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + assertTrue(vfs.mkdirs(multipleLevelToInternalDir)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests that mkdirs should return false when there is a problem with + * fallbackfs. + */ + @Test + public void testMkdirsShouldReturnFalseWhenFallbackFSNotAvailable() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/test", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + //user1/test1 does not exist in fallback + Path nextLevelToInternalDir = new Path("/user1/test1"); + Path test = Path.mergePaths(fallbackTarget, nextLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + // user1 exists in viewFS mount. + assertNotNull(vfs.getFileStatus(new Path("/user1"))); + // user1 does not exists in fallback. + assertFalse(fsTarget.exists(test.getParent())); + cluster.shutdownNameNodes(); // Stopping fallback server + // /user1/test1 does not exist in mount internal dir tree, it would + // attempt to create in fallback. + intercept(IOException.class, () -> { + vfs.mkdirs(nextLevelToInternalDir); + }); + cluster.restartNameNodes(); + // should return true succeed when fallback fs is back to normal. + assertTrue(vfs.mkdirs(nextLevelToInternalDir)); + assertTrue(fsTarget.exists(test)); + } + } + + /** + * Tests that the create file should be successful when the parent directory + * is same as the existent fallback directory. The new file should be created + * in fallback. + */ + @Test + public void testCreateFileOnInternalMountDirWithSameDirTreeExistInFallback() + throws Exception { + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + Path dir1 = new Path(fallbackTarget, "user1/hive/warehouse/partition-0"); + fsTarget.mkdirs(dir1); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestFile = new Path("/user1/hive/warehouse/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + assertTrue(fsTarget.exists(testFileInFallback.getParent())); + vfs.create(vfsTestFile).close(); + assertTrue(fsTarget.exists(testFileInFallback)); + } + } + + /** + * Tests the making of a new directory which is not matching to any of + * internal directory. + */ + @Test + public void testCreateNewFileWithOutMatchingToMountDirOrFallbackDirPath() + throws Exception { + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestFile = new Path("/user2/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + // user2 does not exist in fallback + assertFalse(fsTarget.exists(testFileInFallback.getParent())); + vfs.create(vfsTestFile).close(); + // /user2/test.file should be created in fallback + assertTrue(fsTarget.exists(testFileInFallback)); + } + } + + /** + * Tests the making of a new file on root which is not matching to any of + * fallback files on root. + */ + @Test + public void testCreateFileOnRootWithFallbackEnabled() throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestFile = new Path("/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + vfs.create(vfsTestFile).close(); + // /test.file should be created in fallback + assertTrue(fsTarget.exists(testFileInFallback)); + } + } + + /** + * Tests the create of a file on root where the path is matching to an + * existing file on fallback's file on root. + */ + @Test (expected = FileAlreadyExistsException.class) + public void testCreateFileOnRootWithFallbackWithFileAlreadyExist() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + Path testFile = new Path(fallbackTarget, "test.file"); + // pre-creating test file in fallback. + fsTarget.create(testFile).close(); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestFile = new Path("/test.file"); + assertTrue(fsTarget.exists(testFile)); + vfs.create(vfsTestFile, false).close(); + } + } + + /** + * Tests the creating of a file where the path is same as mount link path. + */ + @Test(expected= FileAlreadyExistsException.class) + public void testCreateFileWhereThePathIsSameAsItsMountLinkPath() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestDir = new Path("/user1/hive"); + assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir))); + vfs.create(vfsTestDir).close(); + } + } + + /** + * Tests the create of a file where the path is same as one of of the internal + * dir path should fail. + */ + @Test + public void testCreateFileSameAsInternalDirPath() throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (FileSystem vfs = FileSystem.get(viewFsDefaultClusterUri, conf)) { + Path vfsTestDir = new Path("/user1"); + assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir))); + try { + vfs.create(vfsTestDir); + Assert.fail("Should fail to create file as this is an internal dir."); + } catch (NotInMountpointException e){ + // This tree is part of internal tree. The above exception will be + // thrown from getDefaultReplication, getDefaultBlockSize APIs which was + // called in create API. + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java new file mode 100644 index 0000000000000..d3afa47f7554b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java @@ -0,0 +1,462 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.fs.viewfs.RegexMountPoint.INTERCEPTOR_INTERNAL_SEP; +import static org.junit.Assert.assertSame; + +/** + * Test linkRegex node type for view file system. + */ +public class TestViewFileSystemLinkRegex extends ViewFileSystemBaseTest { + public static final Logger LOGGER = + LoggerFactory.getLogger(TestViewFileSystemLinkRegex.class); + + private static FileSystem fsDefault; + private static MiniDFSCluster cluster; + private static Configuration clusterConfig; + private static final int NAME_SPACES_COUNT = 3; + private static final int DATA_NODES_COUNT = 3; + private static final int FS_INDEX_DEFAULT = 0; + private static final FileSystem[] FS_HDFS = new FileSystem[NAME_SPACES_COUNT]; + private static final String CLUSTER_NAME = + "TestViewFileSystemLinkRegexCluster"; + private static final File TEST_DIR = GenericTestUtils + .getTestDir(TestViewFileSystemLinkRegex.class.getSimpleName()); + private static final String TEST_BASE_PATH = + "/tmp/TestViewFileSystemLinkRegex"; + + @Override + protected FileSystemTestHelper createFileSystemHelper() { + return new FileSystemTestHelper(TEST_BASE_PATH); + } + + @BeforeClass + public static void clusterSetupAtBeginning() throws IOException { + SupportsBlocks = true; + clusterConfig = ViewFileSystemTestSetup.createConfig(); + clusterConfig.setBoolean( + DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, + true); + cluster = new MiniDFSCluster.Builder(clusterConfig).nnTopology( + MiniDFSNNTopology.simpleFederatedTopology(NAME_SPACES_COUNT)) + .numDataNodes(DATA_NODES_COUNT).build(); + cluster.waitClusterUp(); + + for (int i = 0; i < NAME_SPACES_COUNT; i++) { + FS_HDFS[i] = cluster.getFileSystem(i); + } + fsDefault = FS_HDFS[FS_INDEX_DEFAULT]; + } + + @AfterClass + public static void clusterShutdownAtEnd() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Override + @Before + public void setUp() throws Exception { + fsTarget = fsDefault; + super.setUp(); + } + + /** + * Override this so that we don't set the targetTestRoot to any path under the + * root of the FS, and so that we don't try to delete the test dir, but rather + * only its contents. + */ + @Override + void initializeTargetTestRoot() throws IOException { + targetTestRoot = fsDefault.makeQualified(new Path("/")); + for (FileStatus status : fsDefault.listStatus(targetTestRoot)) { + fsDefault.delete(status.getPath(), true); + } + } + + @Override + void setupMountPoints() { + super.setupMountPoints(); + } + + @Override + int getExpectedDelegationTokenCount() { + return 1; // all point to the same fs so 1 unique token + } + + @Override + int getExpectedDelegationTokenCountWithCredentials() { + return 1; + } + + public String buildReplaceInterceptorSettingString(String srcRegex, + String replaceString) { + return + RegexMountPointInterceptorType.REPLACE_RESOLVED_DST_PATH.getConfigName() + + INTERCEPTOR_INTERNAL_SEP + srcRegex + INTERCEPTOR_INTERNAL_SEP + + replaceString; + } + + public String linkInterceptorSettings( + List interceptorSettingStrList) { + StringBuilder stringBuilder = new StringBuilder(); + int listSize = interceptorSettingStrList.size(); + for (int i = 0; i < listSize; ++i) { + stringBuilder.append(interceptorSettingStrList.get(i)); + if (i < listSize - 1) { + stringBuilder.append(RegexMountPoint.INTERCEPTOR_SEP); + } + } + return stringBuilder.toString(); + } + + private void createDirWithChildren( + FileSystem fileSystem, Path dir, List childrenFiles) + throws IOException { + Assert.assertTrue(fileSystem.mkdirs(dir)); + int index = 0; + for (Path childFile : childrenFiles) { + createFile(fileSystem, childFile, index, true); + } + } + + private void createFile( + FileSystem fileSystem, Path file, int dataLenToWrite, boolean overwrite) + throws IOException { + FSDataOutputStream outputStream = null; + try { + outputStream = fileSystem.create(file, overwrite); + for (int i = 0; i < dataLenToWrite; ++i) { + outputStream.writeByte(i); + } + outputStream.close(); + } finally { + if (outputStream != null) { + outputStream.close(); + } + } + } + + private void createDirWithChildren( + FileSystem fileSystem, Path dir, int childrenFilesCnt) + throws IOException { + List childrenFiles = new ArrayList<>(childrenFilesCnt); + for (int i = 0; i < childrenFilesCnt; ++i) { + childrenFiles.add(new Path(dir, "file" + i)); + } + createDirWithChildren(fileSystem, dir, childrenFiles); + } + + /** + * The function used to test regex mountpoints. + * @param config - get mountable config from this conf + * @param regexStr - the src path regex expression that applies to this config + * @param dstPathStr - the string of target path + * @param interceptorSettings - the serialized interceptor string to be + * applied while resolving the mapping + * @param dirPathBeforeMountPoint - the src path user passed in to be mapped. + * @param expectedResolveResult - the expected path after resolve + * dirPathBeforeMountPoint via regex mountpint. + * @param childrenFilesCnt - the child files under dirPathBeforeMountPoint to + * be created + * @throws IOException + * @throws URISyntaxException + */ + private void testRegexMountpoint( + Configuration config, + String regexStr, + String dstPathStr, + String interceptorSettings, + Path dirPathBeforeMountPoint, + Path expectedResolveResult, + int childrenFilesCnt) + throws IOException, URISyntaxException { + // Set up test env + createDirWithChildren( + fsTarget, expectedResolveResult, childrenFilesCnt); + ConfigUtil.addLinkRegex( + config, CLUSTER_NAME, regexStr, dstPathStr, interceptorSettings); + // Asserts + URI viewFsUri = new URI( + FsConstants.VIEWFS_SCHEME, CLUSTER_NAME, "/", null, null); + try (FileSystem vfs = FileSystem.get(viewFsUri, config)) { + Assert.assertEquals(expectedResolveResult.toString(), + vfs.resolvePath(dirPathBeforeMountPoint).toString()); + Assert.assertTrue( + vfs.getFileStatus(dirPathBeforeMountPoint).isDirectory()); + Assert.assertEquals( + childrenFilesCnt, vfs.listStatus(dirPathBeforeMountPoint).length); + + // Test Inner cache, the resolved result's filesystem should be the same. + ViewFileSystem viewFileSystem = (ViewFileSystem) vfs; + ChRootedFileSystem target1 = (ChRootedFileSystem) viewFileSystem.fsState + .resolve(viewFileSystem.getUriPath(dirPathBeforeMountPoint), true) + .targetFileSystem; + ChRootedFileSystem target2 = (ChRootedFileSystem) viewFileSystem.fsState + .resolve(viewFileSystem.getUriPath(dirPathBeforeMountPoint), true) + .targetFileSystem; + assertSame(target1.getMyFs(), target2.getMyFs()); + } + } + /** + * Test regex mount points which use capture group index for mapping. + * + * @throws Exception + */ + @Test + public void testConfLinkRegexIndexMapping() throws Exception { + // Config: + // + // + // fs.viewfs.mounttable.TestViewFileSystemLinkRegexCluster + // .linkRegex.^/(\w+) + // /targetTestRoot/$1 + // + // Dir path to test: /testConfLinkRegexIndexMapping1 + // Expect path: /targetTestRoot/testConfLinkRegexIndexMapping1 + String regexStr = "^/(\\w+)"; + String dstPathStr = targetTestRoot + "$1"; + Path srcPath = new Path("/testConfLinkRegexIndexMapping1"); + Path expectedResolveResult = new Path(dstPathStr.replace( + "$1", "testConfLinkRegexIndexMapping1")); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, null, + srcPath, expectedResolveResult, 3); + + // Config: + // + // fs.viewfs.mounttable.TestViewFileSystemLinkRegexCluster + // .linkRegex.^/(\w+) + // /targetTestRoot/${1} + // + // Dir path to test: /testConfLinkRegexIndexMapping2 + // Expect path: /targetTestRoot/testConfLinkRegexIndexMapping2 + + dstPathStr = targetTestRoot + "${1}"; + srcPath = new Path("/testConfLinkRegexIndexMapping2"); + expectedResolveResult = + new Path( + dstPathStr.replace("${1}", "testConfLinkRegexIndexMapping2")); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, null, + srcPath, expectedResolveResult, 4); + + // Config: + // + // fs.viewfs.mounttable.TestViewFileSystemLinkRegexCluster + // .linkRegex.^/(\w+) + // /targetTestRoot/$1 + // + // Dir path to test: /testConfLinkRegexIndexMapping3/dir1 + // Expect path: /targetTestRoot/testConfLinkRegexIndexMapping3/dir1 + dstPathStr = targetTestRoot + "$1"; + srcPath = new Path("/testConfLinkRegexIndexMapping3/dir1"); + expectedResolveResult = new Path( + dstPathStr.replace("$1", "testConfLinkRegexIndexMapping3/dir1")); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, null, + srcPath, expectedResolveResult, 5); + + // Config: + // + // fs.viewfs.mounttable.TestViewFileSystemLinkRegexCluster + // .linkRegex.^/(\w+) + // /targetTestRoot/${1}/ + // + // Dir path to test: /testConfLinkRegexIndexMapping4/dir1 + // Expect path: /targetTestRoot/testConfLinkRegexIndexMapping4/dir1 + dstPathStr = targetTestRoot + "${1}/"; + srcPath = new Path("/testConfLinkRegexIndexMapping4/dir1"); + expectedResolveResult = new Path( + dstPathStr.replace("${1}", "testConfLinkRegexIndexMapping4/dir1")); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, null, + srcPath, expectedResolveResult, 6); + } + + /** + * Test regex mount pointes with named capture group. + * @throws Exception + */ + @Test + public void testConfLinkRegexNamedGroupMapping() throws Exception { + // Config: + // + // fs.viewfs.mounttable.TestViewFileSystemLinkRegexCluster + // .linkRegex.^/(?\w+) + // /targetTestRoot/$firstDir + // + // Dir path to test: /testConfLinkRegexNamedGroupMapping1 + // Expect path: /targetTestRoot/testConfLinkRegexNamedGroupMapping1 + URI viewFsUri = new URI( + FsConstants.VIEWFS_SCHEME, CLUSTER_NAME, "/", null, null); + String regexStr = "^/(?\\w+)"; + String dstPathStr = targetTestRoot + "$firstDir"; + Path srcPath = new Path("/testConfLinkRegexNamedGroupMapping1"); + Path expectedResolveResult = new Path( + dstPathStr.replace("$firstDir", "testConfLinkRegexNamedGroupMapping1")); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, null, + srcPath, expectedResolveResult, 3); + + // Config: + // + // fs.viewfs.mounttable.TestViewFileSystemLinkRegexCluster + // .linkRegex.^/(?\w+) + // /targetTestRoot/${firstDir} + // + // Dir path to test: /testConfLinkRegexNamedGroupMapping2 + // Expect path: /targetTestRoot/testConfLinkRegexNamedGroupMapping2 + dstPathStr = targetTestRoot + "${firstDir}"; + srcPath = new Path("/testConfLinkRegexNamedGroupMapping2"); + expectedResolveResult = new Path( + dstPathStr.replace( + "${firstDir}", "testConfLinkRegexNamedGroupMapping2")); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, null, + srcPath, expectedResolveResult, 5); + } + + /** + * Test cases when the destination is fixed paths. + * @throws Exception + */ + @Test + public void testConfLinkRegexFixedDestMapping() throws Exception { + // Config: + // + // fs.viewfs.mounttable.TestViewFileSystemLinkRegexCluster + // .linkRegex.^/(?\w+) + // /targetTestRoot/${firstDir} + // + // Dir path to test: /misc1 + // Expect path: /targetTestRoot/testConfLinkRegexFixedDestMappingFile + // Dir path to test: /misc2 + // Expect path: /targetTestRoot/testConfLinkRegexFixedDestMappingFile + String regexStr = "^/\\w+"; + String dstPathStr = + targetTestRoot + "testConfLinkRegexFixedDestMappingFile"; + Path expectedResolveResult = new Path(dstPathStr); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, null, + new Path("/misc1"), expectedResolveResult, 5); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, null, + new Path("/misc2"), expectedResolveResult, 6); + } + + /** + * Test regex mount point config with a single interceptor. + * + */ + @Test + public void testConfLinkRegexWithSingleInterceptor() throws Exception { + // Config: + // + // fs.viewfs.mounttable.TestViewFileSystemLinkRegexCluster + // .linkRegex.replaceresolveddstpath:_:-#.^/user/(?\w+) + // /targetTestRoot/$username + // + // Dir path to test: /user/hadoop_user1/hadoop_dir1 + // Expect path: /targetTestRoot/hadoop-user1/hadoop_dir1 + + String regexStr = "^/user/(?\\w+)"; + String dstPathStr = targetTestRoot + "$username"; + // Replace "_" with "-" + String settingString = buildReplaceInterceptorSettingString("_", "-"); + Path srcPath = new Path("/user/hadoop_user1/hadoop_dir1"); + Path expectedResolveResult = new Path( + targetTestRoot, "hadoop-user1/hadoop_dir1"); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, settingString, + srcPath, expectedResolveResult, 2); + } + + /** + * Test regex mount point config with multiple interceptors. + * + */ + @Test + public void testConfLinkRegexWithInterceptors() throws Exception { + // Config: + // + // fs.viewfs.mounttable.TestViewFileSystemLinkRegexCluster + // .linkRegex + // .replaceresolveddstpath:_:-; + // replaceresolveddstpath:hadoop:hdfs#.^/user/(?\w+) + // /targetTestRoot/$username + // + // Dir path to test: /user/hadoop_user1/hadoop_dir1 + // Expect path: /targetTestRoot/hdfs-user1/hadoop_dir1 + String regexStr = "^/user/(?\\w+)/"; + String dstPathStr = targetTestRoot + "$username"; + // Replace "_" with "-" + String interceptor1 = buildReplaceInterceptorSettingString("_", "-"); + // Replace "hadoop" with "hdfs" + String interceptor2 = + buildReplaceInterceptorSettingString("hadoop", "hdfs"); + String interceptors = + linkInterceptorSettings(Arrays.asList(interceptor1, interceptor2)); + Path srcPath = new Path("/user/hadoop_user1/hadoop_dir1"); + Path expectedResolveResult = + new Path(targetTestRoot, "hdfs-user1/hadoop_dir1"); + testRegexMountpoint( + new Configuration(conf), + regexStr, dstPathStr, interceptors, + srcPath, expectedResolveResult, 2); + + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java new file mode 100644 index 0000000000000..dcfa051c3902d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java @@ -0,0 +1,135 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT; +import static org.junit.Assume.assumeTrue; + +import java.io.File; +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemContractBaseTest; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.AppendTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.TestHDFSFileSystemContract; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +/** + * Tests ViewFileSystemOverloadScheme with file system contract tests. + */ +public class TestViewFileSystemOverloadSchemeHdfsFileSystemContract + extends TestHDFSFileSystemContract { + + private static MiniDFSCluster cluster; + private static String defaultWorkingDirectory; + private static Configuration conf = new HdfsConfiguration(); + + @BeforeClass + public static void init() throws IOException { + final File basedir = GenericTestUtils.getRandomizedTestDir(); + conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, + FileSystemContractBaseTest.TEST_UMASK); + cluster = new MiniDFSCluster.Builder(conf, basedir) + .numDataNodes(2) + .build(); + defaultWorkingDirectory = + "/user/" + UserGroupInformation.getCurrentUser().getShortUserName(); + } + + @Before + public void setUp() throws Exception { + conf.set(String.format("fs.%s.impl", "hdfs"), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + "hdfs"), + DistributedFileSystem.class.getName()); + conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT); + URI defaultFSURI = + URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); + ConfigUtil.addLink(conf, defaultFSURI.getAuthority(), "/user", + defaultFSURI); + ConfigUtil.addLink(conf, defaultFSURI.getAuthority(), "/append", + defaultFSURI); + ConfigUtil.addLink(conf, defaultFSURI.getAuthority(), + "/FileSystemContractBaseTest/", + new URI(defaultFSURI.toString() + "/FileSystemContractBaseTest/")); + fs = FileSystem.get(conf); + } + + @AfterClass + public static void tearDownAfter() throws Exception { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Override + protected String getDefaultWorkingDirectory() { + return defaultWorkingDirectory; + } + + @Override + @Test + public void testAppend() throws IOException { + AppendTestUtil.testAppend(fs, new Path("/append/f")); + } + + @Override + @Test(expected = AccessControlException.class) + public void testRenameRootDirForbidden() throws Exception { + super.testRenameRootDirForbidden(); + } + + @Override + @Test + public void testListStatusRootDir() throws Throwable { + assumeTrue(rootDirTestEnabled()); + Path dir = path("/"); + Path child = path("/FileSystemContractBaseTest"); + try (FileSystem dfs = ((ViewFileSystemOverloadScheme) fs).getRawFileSystem( + new Path(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY), "/"), + conf)) { + dfs.mkdirs(child); + } + assertListStatusFinds(dir, child); + } + + @Override + @Ignore // This test same as above in this case. + public void testLSRootDir() throws Throwable { + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java new file mode 100644 index 0000000000000..9a858e17ebe4c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java @@ -0,0 +1,715 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RawLocalFileSystem; +import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.test.PathUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT; +import static org.junit.Assert.*; + + +/** + * Tests ViewFileSystemOverloadScheme with configured mount links. + */ +public class TestViewFileSystemOverloadSchemeWithHdfsScheme { + private static final String TEST_STRING = "Hello ViewFSOverloadedScheme!"; + private static final String FS_IMPL_PATTERN_KEY = "fs.%s.impl"; + private static final String HDFS_SCHEME = "hdfs"; + private Configuration conf = null; + private static MiniDFSCluster cluster = null; + private URI defaultFSURI; + private File localTargetDir; + private static final String TEST_ROOT_DIR = PathUtils + .getTestDirName(TestViewFileSystemOverloadSchemeWithHdfsScheme.class); + private static final String HDFS_USER_FOLDER = "/HDFSUser"; + private static final String LOCAL_FOLDER = "/local"; + + @BeforeClass + public static void init() throws IOException { + cluster = + new MiniDFSCluster.Builder(new Configuration()).numDataNodes(2).build(); + cluster.waitClusterUp(); + } + + /** + * Sets up the configurations and starts the MiniDFSCluster. + */ + @Before + public void setUp() throws IOException { + Configuration config = getNewConf(); + config.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); + config.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME), + ViewFileSystemOverloadScheme.class.getName()); + config.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT); + setConf(config); + defaultFSURI = + URI.create(config.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); + localTargetDir = new File(TEST_ROOT_DIR, "/root/"); + localTargetDir.mkdirs(); + Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme. + } + + @After + public void cleanUp() throws IOException { + if (cluster != null) { + FileSystem fs = new DistributedFileSystem(); + fs.initialize(defaultFSURI, conf); + try { + FileStatus[] statuses = fs.listStatus(new Path("/")); + for (FileStatus st : statuses) { + Assert.assertTrue(fs.delete(st.getPath(), true)); + } + } finally { + fs.close(); + } + FileSystem.closeAll(); + } + } + + @AfterClass + public static void tearDown() throws IOException { + if (cluster != null) { + FileSystem.closeAll(); + cluster.shutdown(); + } + } + + /** + * Adds the given mount links to config. sources contains mount link src and + * the respective index location in targets contains the target uri. + */ + void addMountLinks(String mountTable, String[] sources, String[] targets, + Configuration config) throws IOException, URISyntaxException { + ViewFsTestSetup.addMountLinksToConf(mountTable, sources, targets, config); + } + + /** + * Create mount links as follows. + * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ + * + * create file /HDFSUser/testfile should create in hdfs + * create file /local/test should create directory in local fs + */ + @Test(timeout = 30000) + public void testMountLinkWithLocalAndHDFS() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + final Path localTragetPath = new Path(localTargetDir.toURI()); + + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString() }, + conf); + + // /HDFSUser/testfile + Path hdfsFile = new Path(HDFS_USER_FOLDER + "/testfile"); + // /local/test + Path localDir = new Path(LOCAL_FOLDER + "/test"); + + try (FileSystem fs + = FileSystem.get(conf)) { + Assert.assertEquals(2, fs.getChildFileSystems().length); + fs.createNewFile(hdfsFile); // /HDFSUser/testfile + fs.mkdirs(localDir); // /local/test + } + + // Initialize HDFS and test files exist in ls or not + try (DistributedFileSystem dfs = new DistributedFileSystem()) { + dfs.initialize(defaultFSURI, conf); + Assert.assertTrue(dfs.exists( + new Path(Path.getPathWithoutSchemeAndAuthority(hdfsTargetPath), + hdfsFile.getName()))); // should be in hdfs. + Assert.assertFalse(dfs.exists( + new Path(Path.getPathWithoutSchemeAndAuthority(localTragetPath), + localDir.getName()))); // should not be in local fs. + } + + try (RawLocalFileSystem lfs = new RawLocalFileSystem()) { + lfs.initialize(localTragetPath.toUri(), conf); + Assert.assertFalse(lfs.exists( + new Path(Path.getPathWithoutSchemeAndAuthority(hdfsTargetPath), + hdfsFile.getName()))); // should not be in hdfs. + Assert.assertTrue(lfs.exists( + new Path(Path.getPathWithoutSchemeAndAuthority(localTragetPath), + localDir.getName()))); // should be in local fs. + } + } + + /** + * Create mount links as follows. + * hdfs://localhost:xxx/HDFSUser --> nonexistent://NonExistent/User/ + * It should fail to add non existent fs link. + */ + @Test(timeout = 30000) + public void testMountLinkWithNonExistentLink() throws Exception { + testMountLinkWithNonExistentLink(true); + } + + public void testMountLinkWithNonExistentLink(boolean expectFsInitFailure) + throws Exception { + final String userFolder = "/User"; + final Path nonExistTargetPath = + new Path("nonexistent://NonExistent" + userFolder); + + /** + * Below addLink will create following mount points + * hdfs://localhost:xxx/User --> nonexistent://NonExistent/User/ + */ + addMountLinks(defaultFSURI.getAuthority(), new String[] {userFolder}, + new String[] {nonExistTargetPath.toUri().toString()}, conf); + if (expectFsInitFailure) { + LambdaTestUtils.intercept(IOException.class, () -> { + FileSystem.get(conf); + }); + } else { + try (FileSystem fs = FileSystem.get(conf)) { + Assert.assertEquals("hdfs", fs.getScheme()); + } + } + } + + /** + * Create mount links as follows. + * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ + * ListStatus on / should list the mount links. + */ + @Test(timeout = 30000) + public void testListStatusOnRootShouldListAllMountLinks() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString() }, + conf); + try (DistributedFileSystem dfs = new DistributedFileSystem()) { + dfs.initialize(defaultFSURI, conf); + dfs.mkdirs(hdfsTargetPath); + } + + try (RawLocalFileSystem lfs = new RawLocalFileSystem()) { + lfs.initialize(localTargetDir.toURI(), conf); + lfs.mkdirs(new Path(localTargetDir.toURI())); + } + try (FileSystem fs = FileSystem.get(conf)) { + fs.mkdirs(hdfsTargetPath); + FileStatus[] ls = fs.listStatus(new Path("/")); + Assert.assertEquals(2, ls.length); + String lsPath1 = + Path.getPathWithoutSchemeAndAuthority(ls[0].getPath()).toString(); + String lsPath2 = + Path.getPathWithoutSchemeAndAuthority(ls[1].getPath()).toString(); + Assert.assertTrue( + HDFS_USER_FOLDER.equals(lsPath1) || LOCAL_FOLDER.equals(lsPath1)); + Assert.assertTrue( + HDFS_USER_FOLDER.equals(lsPath2) || LOCAL_FOLDER.equals(lsPath2)); + } + } + + /** + * Create mount links as follows + * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ + * ListStatus non mount directory should fail. + */ + @Test(expected = IOException.class, timeout = 30000) + public void testListStatusOnNonMountedPath() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString() }, + conf); + + try (FileSystem fs = FileSystem.get(conf)) { + fs.listStatus(new Path("/nonMount")); + Assert.fail("It should fail as no mount link with /nonMount"); + } + } + + /** + * Create mount links as follows + * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ + * Check that "viewfs:/" paths without authority can work when the + * default mount table name is set correctly. + */ + @Test + public void testAccessViewFsPathWithoutAuthority() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString() }, + conf); + + // /HDFSUser/test + Path hdfsDir = new Path(HDFS_USER_FOLDER, "test"); + // /local/test + Path localDir = new Path(LOCAL_FOLDER, "test"); + FileStatus[] expectedStatus; + + try (FileSystem fs = FileSystem.get(conf)) { + fs.mkdirs(hdfsDir); // /HDFSUser/test + fs.mkdirs(localDir); // /local/test + expectedStatus = fs.listStatus(new Path("/")); + } + + // check for viewfs path without authority + Path viewFsRootPath = new Path("viewfs:/"); + LambdaTestUtils.intercept(IOException.class, + "Empty Mount table in config for viewfs://default", () -> { + viewFsRootPath.getFileSystem(conf); + }); + + // set the name of the default mount table here and + // subsequent calls should succeed. + conf.set(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, + defaultFSURI.getAuthority()); + + try (FileSystem fs = viewFsRootPath.getFileSystem(conf)) { + FileStatus[] status = fs.listStatus(viewFsRootPath); + // compare only the final components of the paths as + // full paths have different schemes (hdfs:/ vs. viewfs:/). + List expectedPaths = Arrays.stream(expectedStatus) + .map(s -> s.getPath().getName()).sorted() + .collect(Collectors.toList()); + List paths = Arrays.stream(status) + .map(s -> s.getPath().getName()).sorted() + .collect(Collectors.toList()); + assertEquals(expectedPaths, paths); + } + } + + /** + * Create mount links as follows hdfs://localhost:xxx/HDFSUser --> + * hdfs://localhost:xxx/HDFSUser/ hdfs://localhost:xxx/local --> + * file://TEST_ROOT_DIR/root/ fallback --> hdfs://localhost:xxx/HDFSUser/ + * Creating file or directory at non root level should succeed with fallback + * links. + */ + @Test(timeout = 30000) + public void testWithLinkFallBack() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER, + Constants.CONFIG_VIEWFS_LINK_FALLBACK }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString(), + hdfsTargetPath.toUri().toString() }, + conf); + + try (FileSystem fs = FileSystem.get(conf)) { + fs.createNewFile(new Path("/nonMount/myfile")); + FileStatus[] ls = fs.listStatus(new Path("/nonMount")); + Assert.assertEquals(1, ls.length); + Assert.assertEquals( + Path.getPathWithoutSchemeAndAuthority(ls[0].getPath()).getName(), + "myfile"); + } + } + + /** + * Create mount links as follows + * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ + * + * It cannot find any mount link. ViewFS expects a mount point from root. + */ + @Test(timeout = 30000) + public void testCreateOnRoot() throws Exception { + testCreateOnRoot(false); + } + + public void testCreateOnRoot(boolean fallbackExist) throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER}, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString()}, conf); + try (FileSystem fs = FileSystem.get(conf)) { + if (fallbackExist) { + Assert.assertTrue(fs.createNewFile(new Path("/newFileOnRoot"))); + } else { + LambdaTestUtils.intercept(NotInMountpointException.class, () -> { + fs.createNewFile(new Path("/newFileOnRoot")); + }); + } + } + } + + /** + * Create mount links as follows + * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ + * fallback --> hdfs://localhost:xxx/HDFSUser/ + * + * Note: Above links created because to make fs initialization success. + * Otherwise will not proceed if no mount links. + * + * Unset fs.viewfs.overload.scheme.target.hdfs.impl property. + * So, OverloadScheme target fs initialization will fail. + */ + @Test(expected = UnsupportedFileSystemException.class, timeout = 30000) + public void testInvalidOverloadSchemeTargetFS() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + String mountTableIfSet = conf.get(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH); + conf = new Configuration(); + if (mountTableIfSet != null) { + conf.set(Constants.CONFIG_VIEWFS_MOUNTTABLE_PATH, mountTableIfSet); + } + addMountLinks(defaultFSURI.getHost(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER, + Constants.CONFIG_VIEWFS_LINK_FALLBACK }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString(), + hdfsTargetPath.toUri().toString() }, + conf); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, + defaultFSURI.toString()); + conf.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME), + ViewFileSystemOverloadScheme.class.getName()); + conf.unset(String.format( + FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + HDFS_SCHEME)); + + try (FileSystem fs = FileSystem.get(conf)) { + fs.createNewFile(new Path("/onRootWhenFallBack")); + Assert.fail("OverloadScheme target fs should be valid."); + } + } + + /** + * Create mount links as follows + * hdfs://localhost:xxx/HDFSUser --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/local --> file://TEST_ROOT_DIR/root/ + * + * It should be able to create file using ViewFileSystemOverloadScheme. + */ + @Test(timeout = 30000) + public void testViewFsOverloadSchemeWhenInnerCacheDisabled() + throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER }, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString(), }, + conf); + conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false); + try (FileSystem fs = FileSystem.get(conf)) { + Path testFile = new Path(HDFS_USER_FOLDER + "/testFile"); + fs.createNewFile(testFile); + Assert.assertTrue(fs.exists(testFile)); + } + } + + /** + * Create mount links as follows + * hdfs://localhost:xxx/HDFSUser0 --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/HDFSUser1 --> hdfs://localhost:xxx/HDFSUser/ + * + * 1. With cache, only one hdfs child file system instance should be there. + * 2. Without cache, there should 2 hdfs instances. + */ + @Test(timeout = 30000) + public void testViewFsOverloadSchemeWithInnerCache() + throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER + 0, HDFS_USER_FOLDER + 1 }, + new String[] {hdfsTargetPath.toUri().toString(), + hdfsTargetPath.toUri().toString() }, + conf); + + // 1. Only 1 hdfs child file system should be there with cache. + try (FileSystem vfs = FileSystem.get(conf)) { + Assert.assertEquals(1, vfs.getChildFileSystems().length); + } + + // 2. Two hdfs file systems should be there if no cache. + conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false); + try (FileSystem vfs = FileSystem.get(conf)) { + Assert.assertEquals(isFallBackExist(conf) ? 3 : 2, + vfs.getChildFileSystems().length); + } + } + + // HDFS-15529: if any extended tests added fallback, then getChildFileSystems + // will include fallback as well. + private boolean isFallBackExist(Configuration config) { + return config.get(ConfigUtil.getConfigViewFsPrefix(defaultFSURI + .getAuthority()) + "." + Constants.CONFIG_VIEWFS_LINK_FALLBACK) != null; + } + + /** + * Create mount links as follows + * hdfs://localhost:xxx/HDFSUser0 --> hdfs://localhost:xxx/HDFSUser/ + * hdfs://localhost:xxx/HDFSUser1 --> hdfs://localhost:xxx/HDFSUser/ + * + * When InnerCache disabled, all matching ViewFileSystemOverloadScheme + * initialized scheme file systems would not use FileSystem cache. + */ + @Test(timeout = 3000) + public void testViewFsOverloadSchemeWithNoInnerCacheAndHdfsTargets() + throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {HDFS_USER_FOLDER + 0, HDFS_USER_FOLDER + 1 }, + new String[] {hdfsTargetPath.toUri().toString(), + hdfsTargetPath.toUri().toString() }, + conf); + + conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false); + // Two hdfs file systems should be there if no cache. + try (FileSystem vfs = FileSystem.get(conf)) { + Assert.assertEquals(isFallBackExist(conf) ? 3 : 2, + vfs.getChildFileSystems().length); + } + } + + /** + * Create mount links as follows + * hdfs://localhost:xxx/local0 --> file://localPath/ + * hdfs://localhost:xxx/local1 --> file://localPath/ + * + * When InnerCache disabled, all non matching ViewFileSystemOverloadScheme + * initialized scheme file systems should continue to take advantage of + * FileSystem cache. + */ + @Test(timeout = 3000) + public void testViewFsOverloadSchemeWithNoInnerCacheAndLocalSchemeTargets() + throws Exception { + final Path localTragetPath = new Path(localTargetDir.toURI()); + addMountLinks(defaultFSURI.getAuthority(), + new String[] {LOCAL_FOLDER + 0, LOCAL_FOLDER + 1 }, + new String[] {localTragetPath.toUri().toString(), + localTragetPath.toUri().toString() }, + conf); + + // Only one local file system should be there if no InnerCache, but fs + // cache should work. + conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false); + try (FileSystem vfs = FileSystem.get(conf)) { + Assert.assertEquals(isFallBackExist(conf) ? 2 : 1, + vfs.getChildFileSystems().length); + } + } + + /** + * Tests the rename with nfly mount link. + */ + @Test(timeout = 3000) + public void testNflyRename() throws Exception { + final Path hdfsTargetPath1 = new Path(defaultFSURI + HDFS_USER_FOLDER); + final Path hdfsTargetPath2 = new Path(defaultFSURI + HDFS_USER_FOLDER + 1); + final URI uri1 = hdfsTargetPath1.toUri(); + final URI uri2 = hdfsTargetPath2.toUri(); + final Path nflyRoot = new Path("/nflyroot"); + + final String nflyLinkKey = Constants.CONFIG_VIEWFS_LINK_NFLY + + ".minReplication=2." + nflyRoot.toString(); + addMountLinks(defaultFSURI.getAuthority(), new String[] {nflyLinkKey }, + new String[] {uri1.toString() + "," + uri2.toString() }, conf); + final FileSystem nfly = FileSystem.get(defaultFSURI, conf); + + final Path testDir = new Path("/nflyroot/testdir1/sub1/sub3"); + final Path testDirTmp = new Path("/nflyroot/testdir1/sub1/sub3_temp"); + assertTrue(testDir + ": Failed to create!", nfly.mkdirs(testDir)); + + // Test renames + assertTrue(nfly.rename(testDir, testDirTmp)); + assertTrue(nfly.rename(testDirTmp, testDir)); + + final URI[] testUris = new URI[] {uri1, uri2 }; + for (final URI testUri : testUris) { + final FileSystem fs = FileSystem.get(testUri, conf); + assertTrue(testDir + " should exist!", fs.exists(testDir)); + } + } + + /** + * Tests the write and read contents with nfly mount link. + */ + @Test(timeout = 3000) + public void testNflyWriteRead() throws Exception { + final Path hdfsTargetPath1 = new Path(defaultFSURI + HDFS_USER_FOLDER); + final Path hdfsTargetPath2 = new Path(defaultFSURI + HDFS_USER_FOLDER + 1); + final URI uri1 = hdfsTargetPath1.toUri(); + final URI uri2 = hdfsTargetPath2.toUri(); + final Path nflyRoot = new Path("/nflyroot"); + final String nflyLinkKey = Constants.CONFIG_VIEWFS_LINK_NFLY + + ".minReplication=2." + nflyRoot.toString(); + addMountLinks(defaultFSURI.getAuthority(), new String[] {nflyLinkKey }, + new String[] {uri1.toString() + "," + uri2.toString() }, conf); + final FileSystem nfly = FileSystem.get(defaultFSURI, conf); + final Path testFile = new Path("/nflyroot/test.txt"); + writeString(nfly, TEST_STRING, testFile); + final URI[] testUris = new URI[] {uri1, uri2 }; + for (final URI testUri : testUris) { + try (FileSystem fs = FileSystem.get(testUri, conf)) { + readString(fs, testFile, TEST_STRING, testUri); + } + } + } + + /** + * 1. Writes contents with nfly link having two target uris. 2. Deletes one + * target file. 3. Tests the read works with repairOnRead flag. 4. Tests that + * previously deleted file fully recovered and exists. + */ + @Test(timeout = 3000) + public void testNflyRepair() throws Exception { + final NflyFSystem.NflyKey repairKey = NflyFSystem.NflyKey.repairOnRead; + final Path hdfsTargetPath1 = new Path(defaultFSURI + HDFS_USER_FOLDER); + final Path hdfsTargetPath2 = new Path(defaultFSURI + HDFS_USER_FOLDER + 1); + final URI uri1 = hdfsTargetPath1.toUri(); + final URI uri2 = hdfsTargetPath2.toUri(); + final Path nflyRoot = new Path("/nflyroot"); + final String nflyLinkKey = Constants.CONFIG_VIEWFS_LINK_NFLY + + ".minReplication=2," + repairKey + "=true." + nflyRoot.toString(); + addMountLinks(defaultFSURI.getAuthority(), new String[] {nflyLinkKey }, + new String[] {uri1.toString() + "," + uri2.toString() }, conf); + try (FileSystem nfly = FileSystem.get(defaultFSURI, conf)) { + // write contents to nfly + final Path testFilePath = new Path("/nflyroot/test.txt"); + writeString(nfly, TEST_STRING, testFilePath); + + final URI[] testUris = new URI[] {uri1, uri2 }; + // both nodes are up again, test repair + FsGetter getter = new ViewFileSystemOverloadScheme.ChildFsGetter("hdfs"); + try (FileSystem fs1 = getter.getNewInstance(testUris[0], conf)) { + // Delete a file from one target URI + String testFile = "/test.txt"; + assertTrue( + fs1.delete(new Path(testUris[0].toString() + testFile), false)); + assertFalse(fs1.exists(new Path(testUris[0].toString() + testFile))); + + // Verify read success. + readString(nfly, testFilePath, TEST_STRING, testUris[0]); + // Verify file recovered. + assertTrue(fs1.exists(new Path(testUris[0].toString() + testFile))); + } + } + } + + /** + * Tests that the fs initialization should ignore the port number when it's + * extracting the mount table name from uri. + */ + @Test(timeout = 30000) + public void testMountTableNameShouldIgnorePortFromURI() throws Exception { + final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER); + conf = new Configuration(getConf()); + addMountLinks(defaultFSURI.getHost(), + new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER, + Constants.CONFIG_VIEWFS_LINK_FALLBACK}, + new String[] {hdfsTargetPath.toUri().toString(), + localTargetDir.toURI().toString(), + hdfsTargetPath.toUri().toString()}, conf); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, + defaultFSURI.toString()); + conf.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME), + ViewFileSystemOverloadScheme.class.getName()); + conf.set(String + .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + HDFS_SCHEME), DistributedFileSystem.class.getName()); + conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, true); + + Path testDirOnRoot = new Path("/test"); + URI uriWithoutPort = new URI("hdfs://" + defaultFSURI.getHost()); + //Initialize with out port + try (FileSystem fs = FileSystem + .get(uriWithoutPort, conf)) { + fs.mkdirs(testDirOnRoot); + fs.delete(testDirOnRoot, true); + } + + //Initialize with port + try (FileSystem fs = FileSystem.get(defaultFSURI, conf)) { + fs.mkdirs(testDirOnRoot); + fs.delete(testDirOnRoot, true); + } + } + + private void writeString(final FileSystem nfly, final String testString, + final Path testFile) throws IOException { + try (FSDataOutputStream fsDos = nfly.create(testFile)) { + fsDos.writeUTF(testString); + } + } + + private void readString(final FileSystem nfly, final Path testFile, + final String testString, final URI testUri) throws IOException { + try (FSDataInputStream fsDis = nfly.open(testFile)) { + assertEquals("Wrong file content", testString, fsDis.readUTF()); + } + } + + /** + * @return configuration. + */ + public Configuration getConf() { + return this.conf; + } + + /** + * @return configuration. + */ + public Configuration getNewConf() { + return new Configuration(cluster.getConfiguration(0)); + } + + /** + * sets configuration. + */ + public void setConf(Configuration config) { + conf = config; + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java index a3e5657086396..a3ccbdb847105 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs.viewfs; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java index 0b99cfea24a2a..2f44b46aa3056 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java @@ -20,7 +20,7 @@ import java.io.IOException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java new file mode 100644 index 0000000000000..09e02be640e5e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java @@ -0,0 +1,576 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import static org.apache.hadoop.fs.CreateFlag.CREATE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.EnumSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.AbstractFileSystem; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Test for viewfs with LinkFallback mount table entries. + */ +public class TestViewFsLinkFallback { + private static FileSystem fsDefault; + private FileSystem fsTarget; + private static MiniDFSCluster cluster; + private static URI viewFsDefaultClusterUri; + private Path targetTestRoot; + + @BeforeClass + public static void clusterSetupAtBeginning() + throws IOException, URISyntaxException { + int nameSpacesCount = 3; + int dataNodesCount = 3; + int fsIndexDefault = 0; + Configuration conf = new Configuration(); + FileSystem[] fsHdfs = new FileSystem[nameSpacesCount]; + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, + true); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleFederatedTopology( + nameSpacesCount)) + .numDataNodes(dataNodesCount) + .build(); + cluster.waitClusterUp(); + + for (int i = 0; i < nameSpacesCount; i++) { + fsHdfs[i] = cluster.getFileSystem(i); + } + fsDefault = fsHdfs[fsIndexDefault]; + viewFsDefaultClusterUri = new URI(FsConstants.VIEWFS_SCHEME, + Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null); + + } + + @AfterClass + public static void clusterShutdownAtEnd() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Before + public void setUp() throws Exception { + fsTarget = fsDefault; + initializeTargetTestRoot(); + } + + private void initializeTargetTestRoot() throws IOException { + targetTestRoot = fsDefault.makeQualified(new Path("/")); + for (FileStatus status : fsDefault.listStatus(targetTestRoot)) { + fsDefault.delete(status.getPath(), true); + } + } + + /** + * Tests that directory making should be successful when the parent directory + * is same as the existent fallback directory. The new dir should be created + * in fallback instead failing. + */ + @Test + public void testMkdirOfLinkParentWithFallbackLinkWithSameMountDirectoryTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path dir1 = new Path(targetTestRoot, + "fallbackDir/user1/hive/warehouse/partition-0"); + fsTarget.mkdirs(dir1); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path p = new Path("/user1/hive/warehouse/test"); + Path test = Path.mergePaths(fallbackTarget, p); + assertFalse(fsTarget.exists(test)); + vfs.mkdir(p, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests that directory making should be successful when attempting to create + * the root directory as it's already exist. + */ + @Test + public void testMkdirOfRootWithFallbackLinkAndMountWithSameDirTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil + .addLink(conf, "/user1", new Path(targetTestRoot.toString()).toUri()); + Path dir1 = new Path(targetTestRoot, "fallbackDir/user1"); + fsTarget.mkdirs(dir1); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path p = new Path("/"); + Path test = Path.mergePaths(fallbackTarget, p); + assertTrue(fsTarget.exists(test)); + vfs.mkdir(p, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests the making of a new directory which is not matching to any of + * internal directory under the root. + */ + @Test + public void testMkdirOfNewDirWithOutMatchingToMountOrFallbackDirTree() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + // user2 does not exist in fallback + Path p = new Path("/user2"); + Path test = Path.mergePaths(fallbackTarget, p); + assertFalse(fsTarget.exists(test)); + vfs.mkdir(p, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests that when the parent dirs does not exist in fallback but the parent + * dir is same as mount internal directory, then we create parent structure + * (mount internal directory tree structure) in fallback. + */ + @Test + public void testMkdirWithFallbackLinkWithMountPathMatchingDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + //user1 does not exist in fallback + Path immediateLevelToInternalDir = new Path("/user1/test"); + Path test = Path.mergePaths(fallbackTarget, immediateLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + vfs.mkdir(immediateLevelToInternalDir, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests that when the parent dirs does not exist in fallback but the + * immediate parent dir is not same as mount internal directory, then we + * create parent structure (mount internal directory tree structure) in + * fallback. + */ + @Test + public void testMkdirOfDeepTreeWithFallbackLinkAndMountPathMatchingDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + //user1 does not exist in fallback + Path multipleLevelToInternalDir = new Path("/user1/test/test"); + Path test = Path.mergePaths(fallbackTarget, multipleLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + vfs.mkdir(multipleLevelToInternalDir, null, true); + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests that mkdir with createParent false should still create parent in + * fallback when the same mount dir exist. + */ + @Test + public void testMkdirShouldCreateParentDirInFallbackWhenMountDirExist() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/hive/test", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = AbstractFileSystem.get(viewFsDefaultClusterUri, + conf); + //user1/hive/test1 does not exist in fallback + Path multipleLevelToInternalDir = new Path("/user1/hive/test1"); + Path test = Path.mergePaths(fallbackTarget, multipleLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + // user1/hive exist in viewFS. + assertNotNull(vfs.getFileStatus(new Path("/user1/hive"))); + // user1/hive does not exists in fallback. + assertFalse(fsTarget.exists(test.getParent())); + + vfs.mkdir(multipleLevelToInternalDir, FsPermission.getDirDefault(), false); + assertTrue(fsTarget.exists(test)); + + } + + /** + * Tests that mkdir should fail with IOE when there is a problem with + * fallbackfs. + */ + @Test + public void testMkdirShouldFailWhenFallbackFSNotAvailable() + throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS, false); + ConfigUtil.addLink(conf, "/user1/test", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = AbstractFileSystem.get(viewFsDefaultClusterUri, + conf); + //user1/test1 does not exist in fallback + Path nextLevelToInternalDir = new Path("/user1/test1"); + Path test = Path.mergePaths(fallbackTarget, nextLevelToInternalDir); + assertFalse(fsTarget.exists(test)); + // user1 exists in viewFS mount. + assertNotNull(vfs.getFileStatus(new Path("/user1"))); + // user1 does not exists in fallback. + assertFalse(fsTarget.exists(test.getParent())); + cluster.shutdownNameNodes(); + try { + // /user1/test1 does not exist in mount internal dir tree, it would + // attempt to create in fallback. + vfs.mkdir(nextLevelToInternalDir, FsPermission.getDirDefault(), + false); + Assert.fail("It should throw IOE when fallback fs not available."); + } catch (IOException e) { + cluster.restartNameNodes(); + // should succeed when fallback fs is back to normal. + vfs.mkdir(nextLevelToInternalDir, FsPermission.getDirDefault(), + false); + } + assertTrue(fsTarget.exists(test)); + } + + /** + * Tests that the create file should be successful when the parent directory + * is same as the existent fallback directory. The new file should be created + * in fallback. + */ + @Test + public void testCreateFileOnInternalMountDirWithSameDirTreeExistInFallback() + throws Exception { + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + Path dir1 = new Path(fallbackTarget, "user1/hive/warehouse/partition-0"); + fsTarget.mkdirs(dir1); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestFile = new Path("/user1/hive/warehouse/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + assertTrue(fsTarget.exists(testFileInFallback.getParent())); + vfs.create(vfsTestFile, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + assertTrue(fsTarget.exists(testFileInFallback)); + + } + + /** + * Tests the making of a new directory which is not matching to any of + * internal directory. + */ + @Test + public void testCreateNewFileWithOutMatchingToMountDirOrFallbackDirPath() + throws Exception { + Configuration conf = new Configuration(); + ConfigUtil.addLink(conf, "/user1/hive/warehouse/partition-0", + new Path(targetTestRoot.toString()).toUri()); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestFile = new Path("/user2/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + // user2 does not exist in fallback + assertFalse(fsTarget.exists(testFileInFallback.getParent())); + vfs.create(vfsTestFile, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault()), + Options.CreateOpts.createParent()).close(); + // /user2/test.file should be created in fallback + assertTrue(fsTarget.exists(testFileInFallback)); + } + + /** + * Tests the making of a new file on root which is not matching to any of + * fallback files on root. + */ + @Test + public void testCreateFileOnRootWithFallbackEnabled() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestFile = new Path("/test.file"); + Path testFileInFallback = Path.mergePaths(fallbackTarget, vfsTestFile); + assertFalse(fsTarget.exists(testFileInFallback)); + vfs.create(vfsTestFile, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + // /test.file should be created in fallback + assertTrue(fsTarget.exists(testFileInFallback)); + + } + + /** + * Tests the create of a file on root where the path is matching to an + * existing file on fallback's file on root. + */ + @Test (expected = FileAlreadyExistsException.class) + public void testCreateFileOnRootWithFallbackWithFileAlreadyExist() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + Path testFile = new Path(fallbackTarget, "test.file"); + // pre-creating test file in fallback. + fsTarget.create(testFile).close(); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestFile = new Path("/test.file"); + assertTrue(fsTarget.exists(testFile)); + vfs.create(vfsTestFile, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + } + + /** + * Tests the creating of a file where the path is same as mount link path. + */ + @Test(expected= FileAlreadyExistsException.class) + public void testCreateFileWhereThePathIsSameAsItsMountLinkPath() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestDir = new Path("/user1/hive"); + assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir))); + vfs.create(vfsTestDir, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + } + + /** + * Tests the create of a file where the path is same as one of of the internal + * dir path should fail. + */ + @Test(expected = FileAlreadyExistsException.class) + public void testCreateFileSameAsInternalDirPath() + throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLink(conf, "/user1/hive/", + new Path(targetTestRoot.toString()).toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + AbstractFileSystem vfs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + Path vfsTestDir = new Path("/user1"); + assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir))); + vfs.create(vfsTestDir, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + } + + /** + * Tests that, when fallBack has files under matching internal dir, listFiles + * should work. + */ + @Test + public void testListFiles() throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + Path fileInFallBackRoot = new Path(fallbackTarget, "GetFileBlockLocations"); + fsTarget.create(fileInFallBackRoot).close(); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + FileContext fc = FileContext.getFileContext(viewFsDefaultClusterUri, conf); + RemoteIterator iterator = + fc.util().listFiles(new Path("/"), false); + assertTrue(iterator.hasNext()); + assertEquals(fileInFallBackRoot.getName(), + iterator.next().getPath().getName()); + } + + @Test + public void testRenameOnInternalDirWithFallback() throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLink(conf, "/user1", + new Path(targetTestRoot.toString() + "/user1").toUri()); + ConfigUtil.addLink(conf, "/NewHDFSUser/next", + new Path(targetTestRoot.toString() + "/newUser1").toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + //Make sure target fs has parent dir structures + try (DistributedFileSystem dfs = new DistributedFileSystem()) { + dfs.initialize(fsDefault.getUri(), conf); + dfs.mkdirs(new Path(targetTestRoot.toString() + "/user1")); + dfs.mkdirs(new Path(fallbackTarget.toString() + "/newUser1")); + } + + final AbstractFileSystem fs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + + Path src = new Path("/newFileOnRoot"); + Path dst = new Path("/newFileOnRoot1"); + fs.create(src, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + verifyRename(fs, src, dst); + + src = new Path("/newFileOnRoot1"); + dst = new Path("/newUser1/newFileOnRoot"); + fs.mkdir(dst.getParent(), FsPermission.getDefault(), true); + verifyRename(fs, src, dst); + + src = new Path("/newUser1/newFileOnRoot"); + dst = new Path("/newUser1/newFileOnRoot1"); + verifyRename(fs, src, dst); + + src = new Path("/newUser1/newFileOnRoot1"); + dst = new Path("/newFileOnRoot"); + verifyRename(fs, src, dst); + + src = new Path("/user1/newFileOnRoot1"); + dst = new Path("/user1/newFileOnRoot"); + fs.create(src, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + verifyRename(fs, src, dst); + } + + @Test + public void testRenameWhenDstOnInternalDirWithFallback() throws Exception { + Configuration conf = new Configuration(); + Path fallbackTarget = new Path(targetTestRoot, "fallbackDir"); + fsTarget.mkdirs(fallbackTarget); + ConfigUtil.addLink(conf, "/InternalDirDoesNotExistInFallback/test", + new Path(targetTestRoot.toString() + "/user1").toUri()); + ConfigUtil.addLink(conf, "/NewHDFSUser/next/next1", + new Path(targetTestRoot.toString() + "/newUser1").toUri()); + ConfigUtil.addLinkFallback(conf, fallbackTarget.toUri()); + + try (DistributedFileSystem dfs = new DistributedFileSystem()) { + dfs.initialize(fsDefault.getUri(), conf); + dfs.mkdirs(new Path(targetTestRoot.toString() + "/newUser1")); + dfs.mkdirs( + new Path(fallbackTarget.toString() + "/NewHDFSUser/next/next1")); + } + + final AbstractFileSystem fs = + AbstractFileSystem.get(viewFsDefaultClusterUri, conf); + final Path src = new Path("/newFileOnRoot"); + final Path dst = new Path("/NewHDFSUser/next"); + fs.mkdir(src, FsPermission.getDefault(), true); + // src and dst types are must be either same dir or files + LambdaTestUtils.intercept(IOException.class, + () -> fs.rename(src, dst, Options.Rename.OVERWRITE)); + + final Path src1 = new Path("/newFileOnRoot1"); + final Path dst1 = new Path("/NewHDFSUser/next/file"); + fs.create(src1, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + verifyRename(fs, src1, dst1); + + final Path src2 = new Path("/newFileOnRoot2"); + final Path dst2 = new Path("/InternalDirDoesNotExistInFallback/file"); + fs.create(src2, EnumSet.of(CREATE), + Options.CreateOpts.perms(FsPermission.getDefault())).close(); + // If fallback does not have same structure as internal, rename will fail. + LambdaTestUtils.intercept(FileNotFoundException.class, + () -> fs.rename(src2, dst2, Options.Rename.OVERWRITE)); + } + + private void verifyRename(AbstractFileSystem fs, Path src, Path dst) + throws Exception { + fs.rename(src, dst, Options.Rename.OVERWRITE); + LambdaTestUtils + .intercept(FileNotFoundException.class, () -> fs.getFileStatus(src)); + Assert.assertNotNull(fs.getFileStatus(dst)); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java index e7e664857c851..694339dc81324 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs.viewfs; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestHelper; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java index 1bc6b2c351ffb..20ec58bc75b1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java @@ -28,7 +28,7 @@ import java.util.Map; import java.util.Random; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 0c7b472638fc8..02dcb6d49a7da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -72,13 +72,13 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; -import com.google.common.base.Charsets; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdfs.tools.DFSck; @@ -200,7 +200,7 @@ import org.junit.Assume; import org.apache.hadoop.util.ToolRunner; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** Utilities for HDFS tests */ public class DFSTestUtil { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java index 20ddfd1865286..ba5a451d8c2f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FSDataInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 94aae539850ae..ba26d81264b01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; @@ -75,11 +76,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import com.google.common.base.Supplier; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.Multimap; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap; import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.InMemoryLevelDBAliasMapClient; +import org.apache.hadoop.hdfs.server.datanode.VolumeScanner; import org.apache.hadoop.hdfs.server.namenode.ImageServlet; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; @@ -142,10 +144,10 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * This class creates a single-process DFS cluster for junit testing. @@ -171,6 +173,13 @@ public class MiniDFSCluster implements AutoCloseable { = DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing"; public static final String DFS_NAMENODE_DECOMMISSION_INTERVAL_TESTING_KEY = DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY + ".testing"; + /** + * For the Junit tests, this is the default value of the The amount of time + * in milliseconds that the BlockScanner times out waiting for the + * {@link VolumeScanner} thread to join during a shutdown call. + */ + public static final long DEFAULT_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC = + TimeUnit.SECONDS.toMillis(30); // Changing this default may break some tests that assume it is 2. private static final int DEFAULT_STORAGES_PER_DATANODE = 2; @@ -217,8 +226,7 @@ public static class Builder { public Builder(Configuration conf) { this.conf = conf; - this.storagesPerDatanode = - FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs(); + initDefaultConfigurations(); if (null == conf.get(HDFS_MINIDFS_BASEDIR)) { conf.set(HDFS_MINIDFS_BASEDIR, new File(getBaseDirectory()).getAbsolutePath()); @@ -227,8 +235,7 @@ public Builder(Configuration conf) { public Builder(Configuration conf, File basedir) { this.conf = conf; - this.storagesPerDatanode = - FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs(); + initDefaultConfigurations(); if (null == basedir) { throw new IllegalArgumentException( "MiniDFSCluster base directory cannot be null"); @@ -492,6 +499,19 @@ public Builder useConfiguredTopologyMappingClass( public MiniDFSCluster build() throws IOException { return new MiniDFSCluster(this); } + + /** + * Initializes default values for the cluster. + */ + private void initDefaultConfigurations() { + long defaultScannerVolumeTimeOut = + conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY, + DEFAULT_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC); + conf.setLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY, + defaultScannerVolumeTimeOut); + this.storagesPerDatanode = + FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs(); + } } /** @@ -2679,7 +2699,8 @@ public void transitionToObserver(int nnIndex) throws IOException, public void rollEditLogAndTail(int nnIndex) throws Exception { getNameNode(nnIndex).getRpcServer().rollEditLog(); for (int i = 2; i < getNumNameNodes(); i++) { - getNameNode(i).getNamesystem().getEditLogTailer().doTailEdits(); + long el = getNameNode(i).getNamesystem().getEditLogTailer().doTailEdits(); + LOG.info("editsLoaded {}", el); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java index 390c61c5e8b72..1d7f38dae4412 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java @@ -22,8 +22,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * This class is used to specify the setup of namenodes when instantiating diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java index 35edab93ccc68..6578ad0fbc8b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java index def2ab788f9fa..0ef3b75adceef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java @@ -51,7 +51,7 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Test randomly mixing append, snapshot and truncate operations. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java index 24d153001e56d..3191fbdf8fe1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java @@ -26,7 +26,7 @@ import java.util.ArrayList; import java.util.concurrent.TimeoutException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java index 081a0c5e91689..54dd33bbc919c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonPathCapabilities; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 76eb824ef0837..c36315be1e945 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -24,8 +24,8 @@ import java.io.IOException; import java.util.*; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockStoragePolicySpi; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index 465a08384103d..873af8b505055 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -27,7 +27,7 @@ import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -245,6 +245,51 @@ public boolean dropHeartbeatPacket() { } } + /** + * Test to ensure heartbeats continue during a flush in case of + * delayed acks. + */ + @Test + public void testHeartbeatDuringFlush() throws Exception { + // Delay sending acks + DataNodeFaultInjector dnFaultInjector = new DataNodeFaultInjector() { + @Override + public void delaySendingAckToUpstream(final String upstreamAddr) + throws IOException { + try { + Thread.sleep(3500); // delay longer than socket timeout + } catch (InterruptedException ie) { + throw new IOException("Interrupted while sleeping"); + } + } + }; + DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get(); + + // Setting the timeout to be 3 seconds. Heartbeat packet + // should be sent every 1.5 seconds if there is no data traffic. + Configuration conf = new HdfsConfiguration(); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, "3000"); + MiniDFSCluster cluster = null; + + try { + int numDataNodes = 1; + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(numDataNodes).build(); + cluster.waitActive(); + FileSystem fs = cluster.getFileSystem(); + FSDataOutputStream out = fs.create(new Path("delayedack.dat"), (short)2); + out.write(0x31); + out.hflush(); + DataNodeFaultInjector.set(dnFaultInjector); // cause ack delay + out.close(); + } finally { + DataNodeFaultInjector.set(oldDnInjector); + if (cluster != null) { + cluster.shutdown(); + } + } + } + /** * Test recovery on restart OOB message. It also tests the delivery of * OOB ack originating from the primary datanode. Since there is only diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java index 917f0dbe09395..df6a7dc814558 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java @@ -173,7 +173,7 @@ private void thistest(Configuration conf, DFSTestUtil util) throws Exception { final DataNode dn = cluster.getDataNodes().get(dnIdx); final String bpid = cluster.getNamesystem().getBlockPoolId(); List replicas = - dn.getFSDataset().getFinalizedBlocks(bpid); + dn.getFSDataset().getSortedFinalizedBlocks(bpid); assertTrue("Replicas do not exist", !replicas.isEmpty()); for (int idx = 0; idx < replicas.size(); idx++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index e560522cd9321..f992a26685ea7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -839,14 +839,18 @@ class Counter { public void testGetFileChecksum() throws Exception { final String f = "/testGetFileChecksum"; final Path p = new Path(f); - - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + // HDFS-15461: the number of datanode is higher than the number of replicas. + // That way when a DN fails, the pipeline can recover. + final int numReplicas = 3; + final int numDatanodes = numReplicas + 1; + final MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); try { cluster.waitActive(); - //create a file + // create a file final FileSystem fs = cluster.getFileSystem(); - DFSTestUtil.createFile(fs, p, 1L << 20, (short)3, 20100402L); + DFSTestUtil.createFile(fs, p, 1L << 20, (short) numReplicas, 20100402L); //get checksum final FileChecksum cs1 = fs.getFileChecksum(p); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java index bf9e4a0811f18..a9d97cfc297f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java @@ -35,7 +35,7 @@ import org.junit.After; import org.junit.Test; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * This test ensures the appropriate response from the system when diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java index 0d322daebb682..f2d580576cfa3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java @@ -17,11 +17,14 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_READ_USE_CACHE_PRIORITY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.hamcrest.CoreMatchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.File; import java.io.IOException; @@ -32,8 +35,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; @@ -219,4 +226,65 @@ public void testNullCheckSumWhenDNRestarted() cluster.shutdown(); } } + + @Test + public void testReadWithPreferredCachingReplica() throws IOException { + Configuration conf = new Configuration(); + conf.setBoolean(DFS_CLIENT_READ_USE_CACHE_PRIORITY, true); + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + DistributedFileSystem fs = null; + Path filePath = new Path("/testReadPreferredCachingReplica"); + try { + fs = cluster.getFileSystem(); + FSDataOutputStream out = fs.create(filePath, true, 4096, (short) 3, 512); + DFSInputStream dfsInputStream = + (DFSInputStream) fs.open(filePath).getWrappedStream(); + LocatedBlock lb = mock(LocatedBlock.class); + when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]); + DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111, + 1112, 1113, 1114); + DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); + when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[] {dnInfo}); + DatanodeInfo retDNInfo = + dfsInputStream.getBestNodeDNAddrPair(lb, null).info; + assertEquals(dnInfo, retDNInfo); + } finally { + fs.delete(filePath, true); + cluster.shutdown(); + } + } + + @Test + public void testReadWithoutPreferredCachingReplica() throws IOException { + Configuration conf = new Configuration(); + conf.setBoolean(DFS_CLIENT_READ_USE_CACHE_PRIORITY, false); + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + DistributedFileSystem fs = null; + Path filePath = new Path("/testReadWithoutPreferredCachingReplica"); + try { + fs = cluster.getFileSystem(); + FSDataOutputStream out = fs.create(filePath, true, 4096, (short) 3, 512); + DFSInputStream dfsInputStream = + (DFSInputStream) fs.open(filePath).getWrappedStream(); + LocatedBlock lb = mock(LocatedBlock.class); + when(lb.getCachedLocations()).thenReturn(new DatanodeInfo[0]); + DatanodeID nodeId = new DatanodeID("localhost", "localhost", "dn0", 1111, + 1112, 1113, 1114); + DatanodeInfo dnInfo = new DatanodeDescriptor(nodeId); + DatanodeInfoWithStorage dnInfoStorage = + new DatanodeInfoWithStorage(dnInfo, "DISK", StorageType.DISK); + when(lb.getLocations()).thenReturn( + new DatanodeInfoWithStorage[] {dnInfoStorage}); + DatanodeInfo retDNInfo = + dfsInputStream.getBestNodeDNAddrPair(lb, null).info; + assertEquals(dnInfo, retDNInfo); + } finally { + fs.delete(filePath, true); + cluster.shutdown(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java index e7002c301c4a8..fe2eee28b751e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java @@ -30,7 +30,9 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; public class TestDFSRename { @@ -175,4 +177,23 @@ public void testRenameWithOverwrite() throws Exception { } } } + + @Test + public void testRename2Options() throws Exception { + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder( + new HdfsConfiguration()).build()) { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + Path path = new Path("/test"); + dfs.mkdirs(path); + GenericTestUtils.LogCapturer auditLog = + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); + dfs.rename(path, new Path("/dir1"), + new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH}); + String auditOut = auditLog.getOutput(); + assertTrue("Rename should have both OVERWRITE and TO_TRASH " + + "flags at namenode but had only " + auditOut, + auditOut.contains("options=[OVERWRITE, TO_TRASH]")); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index b2da68ad57036..cc158877a3834 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -40,8 +40,8 @@ import org.junit.After; import org.junit.Test; -import com.google.common.base.Charsets; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * This test ensures the appropriate response (successful or failure) from diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 65032514b4e5f..139c249f7767c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -34,8 +34,8 @@ import java.util.zip.DeflaterOutputStream; import java.util.zip.GZIPOutputStream; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.RandomStringUtils; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java new file mode 100644 index 0000000000000..ae29da08d41d2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; +import org.junit.Test; + + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; + +public class TestDFSStripedOutputStreamUpdatePipeline { + + @Test + public void testDFSStripedOutputStreamUpdatePipeline() throws Exception { + + Configuration conf = new HdfsConfiguration(); + conf.setLong(DFS_BLOCK_SIZE_KEY, 1 * 1024 * 1024); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(5).build()) { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + // Create a file with EC policy + Path dir = new Path("/test"); + dfs.mkdirs(dir); + dfs.enableErasureCodingPolicy("RS-3-2-1024k"); + dfs.setErasureCodingPolicy(dir, "RS-3-2-1024k"); + Path filePath = new Path("/test/file"); + FSDataOutputStream out = dfs.create(filePath); + try { + for (int i = 0; i < Long.MAX_VALUE; i++) { + out.write(i); + if (i == 1024 * 1024 * 5) { + cluster.stopDataNode(0); + cluster.stopDataNode(1); + cluster.stopDataNode(2); + } + } + } catch(Exception e) { + dfs.delete(filePath, true); + } finally { + // The close should be success, shouldn't get stuck. + IOUtils.closeStream(out); + } + } + } + + /** + * Test writing ec file hang when applying the second block group occurs + * an addBlock exception (e.g. quota exception). + */ + @Test(timeout = 90000) + public void testECWriteHangWhenAddBlockWithException() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1 * 1024 * 1024); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3).build()) { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + // Create a file with EC policy + Path dir = new Path("/test"); + dfs.mkdirs(dir); + dfs.enableErasureCodingPolicy("XOR-2-1-1024k"); + dfs.setErasureCodingPolicy(dir, "XOR-2-1-1024k"); + Path filePath = new Path("/test/file"); + FSDataOutputStream out = dfs.create(filePath); + for (int i = 0; i < 1024 * 1024 * 2; i++) { + out.write(i); + } + dfs.setQuota(dir, 5, 0); + try { + for (int i = 0; i < 1024 * 1024 * 2; i++) { + out.write(i); + } + } catch (Exception e) { + dfs.delete(filePath, true); + } finally { + // The close should be success, shouldn't get stuck. + IOUtils.closeStream(out); + } + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java index 769000e50a0e1..bbe991dacc781 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index 1c33cc4b5fb30..e8bd5cefc7f13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -49,8 +49,8 @@ import org.junit.Ignore; import org.junit.Test; -import com.google.common.base.Charsets; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** * This test ensures the appropriate response (successful or failure) from diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index ff05c9d81d82f..f1e69d0cf4af8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -84,7 +84,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestDFSUtil { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java index 9e3ddcfbc9ac4..0bf21ee1d6219 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java @@ -44,7 +44,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; public class TestDataTransferKeepalive { final Configuration conf = new HdfsConfiguration(); @@ -253,12 +253,11 @@ private void assertXceiverCount(int expected) { } /** - * Returns the datanode's xceiver count, but subtracts 1, since the - * DataXceiverServer counts as one. + * Returns the datanode's active xceiver count. * - * @return int xceiver count, not including DataXceiverServer + * @return the datanode's active xceivers count. */ private int getXceiverCountWithoutServer() { - return dn.getXceiverCount() - 1; + return dn.getXceiverCount(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 77aeff4d2cb6b..4436fd3cac9c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -41,7 +41,7 @@ import org.apache.hadoop.util.VersionInfo; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.net.InetSocketAddress; import java.security.Permission; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index b4d6fc9950392..69dbf6438af93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; @@ -143,7 +143,7 @@ public void testDatanodeReport() throws Exception { assertReports(1, DatanodeReportType.DEAD, client, datanodes, null); Thread.sleep(5000); - assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem")); + assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem")); } finally { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java index a571e46ce8e8b..e8da918e1f7cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Supplier; +import java.net.URI; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -29,20 +30,26 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import java.io.IOException; +import java.util.Collection; import java.util.Queue; import java.util.concurrent.LinkedBlockingQueue; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_ENABLED_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_DEAD_NODE_INTERVAL_MS_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_SUSPECT_NODE_INTERVAL_MS_KEY; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; /** * Tests for dead node detection in DFSClient. @@ -67,6 +74,7 @@ public void setUp() { DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_CONNECTION_TIMEOUT_MS_KEY, 1000); conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 0); + conf.setLong(DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY, 100); } @After @@ -241,42 +249,63 @@ public void testDeadNodeDetectionDeadNodeRecovery() throws Exception { } @Test - public void testDeadNodeDetectionMaxDeadNodesProbeQueue() throws Exception { - conf.setInt(DFS_CLIENT_DEAD_NODE_DETECTION_DEAD_NODE_QUEUE_MAX_KEY, 1); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); - cluster.waitActive(); - - FileSystem fs = cluster.getFileSystem(); - Path filePath = new Path("/testDeadNodeDetectionMaxDeadNodesProbeQueue"); - createFile(fs, filePath); - - // Remove three DNs, - cluster.stopDataNode(0); - cluster.stopDataNode(0); - cluster.stopDataNode(0); - - FSDataInputStream in = fs.open(filePath); - DFSInputStream din = (DFSInputStream) in.getWrappedStream(); - DFSClient dfsClient = din.getDFSClient(); + public void testDeadNodeDetectionDeadNodeProbe() throws Exception { + FileSystem fs = null; + FSDataInputStream in = null; + Path filePath = new Path("/" + GenericTestUtils.getMethodName()); try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + + fs = cluster.getFileSystem(); + createFile(fs, filePath); + + // Remove three DNs, + cluster.stopDataNode(0); + cluster.stopDataNode(0); + cluster.stopDataNode(0); + + in = fs.open(filePath); + DFSInputStream din = (DFSInputStream) in.getWrappedStream(); + DFSClient dfsClient = din.getDFSClient(); + DeadNodeDetector deadNodeDetector = + dfsClient.getClientContext().getDeadNodeDetector(); + // Spy suspect queue and dead queue. + DeadNodeDetector.UniqueQueue queue = + deadNodeDetector.getSuspectNodesProbeQueue(); + DeadNodeDetector.UniqueQueue suspectSpy = + Mockito.spy(queue); + deadNodeDetector.setSuspectQueue(suspectSpy); + queue = deadNodeDetector.getDeadNodesProbeQueue(); + DeadNodeDetector.UniqueQueue deadSpy = Mockito.spy(queue); + deadNodeDetector.setDeadQueue(deadSpy); + // Trigger dead node detection. try { in.read(); } catch (BlockMissingException e) { } Thread.sleep(1500); - Assert.assertTrue((dfsClient.getClientContext().getDeadNodeDetector() - .getDeadNodesProbeQueue().size() - + dfsClient.getDeadNodes(din).size()) <= 4); + Collection deadNodes = + dfsClient.getDeadNodeDetector().clearAndGetDetectedDeadNodes(); + assertEquals(3, deadNodes.size()); + for (DatanodeInfo dead : deadNodes) { + // Each node is suspected once then marked as dead. + Mockito.verify(suspectSpy, Mockito.times(1)).offer(dead); + // All the dead nodes should be scheduled and probed at least once. + Mockito.verify(deadSpy, Mockito.atLeastOnce()).offer(dead); + Mockito.verify(deadSpy, Mockito.atLeastOnce()).poll(); + } } finally { - in.close(); + if (in != null) { + in.close(); + } deleteFile(fs, filePath); } } @Test public void testDeadNodeDetectionSuspectNode() throws Exception { - conf.setInt(DFS_CLIENT_DEAD_NODE_DETECTION_SUSPECT_NODE_QUEUE_MAX_KEY, 1); DeadNodeDetector.setDisabledProbeThreadForTest(true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); @@ -320,6 +349,49 @@ public void testDeadNodeDetectionSuspectNode() throws Exception { } } + @Test + public void testCloseDeadNodeDetector() throws Exception { + DistributedFileSystem dfs0 = (DistributedFileSystem) FileSystem + .newInstance(new URI("hdfs://127.0.0.1:2001/"), conf); + DistributedFileSystem dfs1 = (DistributedFileSystem) FileSystem + .newInstance(new URI("hdfs://127.0.0.1:2001/"), conf); + // The DeadNodeDetector is shared by different DFSClients. + DeadNodeDetector detector = dfs0.getClient().getDeadNodeDetector(); + assertNotNull(detector); + assertSame(detector, dfs1.getClient().getDeadNodeDetector()); + // Close one client. The dead node detector should be alive. + dfs0.close(); + detector = dfs0.getClient().getDeadNodeDetector(); + assertNotNull(detector); + assertSame(detector, dfs1.getClient().getDeadNodeDetector()); + assertTrue(detector.isAlive()); + // Close all clients. The dead node detector should be closed. + dfs1.close(); + detector = dfs0.getClient().getDeadNodeDetector(); + assertNull(detector); + assertSame(detector, dfs1.getClient().getDeadNodeDetector()); + // Create a new client. The dead node detector should be alive. + dfs1 = (DistributedFileSystem) FileSystem + .newInstance(new URI("hdfs://127.0.0.1:2001/"), conf); + DeadNodeDetector newDetector = dfs0.getClient().getDeadNodeDetector(); + assertNotNull(newDetector); + assertTrue(newDetector.isAlive()); + assertNotSame(detector, newDetector); + dfs1.close(); + } + + @Test + public void testDeadNodeDetectorThreadsShutdown() throws Exception { + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem + .newInstance(new URI("hdfs://127.0.0.1:2001/"), conf); + DeadNodeDetector detector = dfs.getClient().getDeadNodeDetector(); + assertNotNull(detector); + dfs.close(); + assertTrue(detector.isThreadsShutdown()); + detector = dfs.getClient().getDeadNodeDetector(); + assertNull(detector); + } + private void createFile(FileSystem fs, Path filePath) throws IOException { FSDataOutputStream out = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index d23191eadef01..04731918f0fc2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -38,8 +38,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Pattern; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.text.TextStringBuilder; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 94830d753fe78..3c903563f4d23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -65,6 +65,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; @@ -75,6 +76,7 @@ import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.PathOperationException; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.StorageStatistics.LongStatistic; import org.apache.hadoop.fs.StorageType; @@ -139,7 +141,7 @@ public class TestDistributedFileSystem { private boolean noXmlDefaults = false; - private HdfsConfiguration getTestConfiguration() { + HdfsConfiguration getTestConfiguration() { HdfsConfiguration conf; if (noXmlDefaults) { conf = new HdfsConfiguration(false); @@ -810,7 +812,7 @@ public void testStatistics() throws IOException { @Test public void testStatistics2() throws IOException, NoSuchAlgorithmException { - HdfsConfiguration conf = new HdfsConfiguration(); + HdfsConfiguration conf = getTestConfiguration(); conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, StoragePolicySatisfierMode.EXTERNAL.toString()); File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString()); @@ -1457,7 +1459,7 @@ public void testCreateWithCustomChecksum() throws Exception { @Test(timeout=60000) public void testFileCloseStatus() throws IOException { - Configuration conf = new HdfsConfiguration(); + Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); DistributedFileSystem fs = cluster.getFileSystem(); try { @@ -1477,7 +1479,7 @@ public void testFileCloseStatus() throws IOException { @Test public void testCreateWithStoragePolicy() throws Throwable { - Configuration conf = new HdfsConfiguration(); + Configuration conf = getTestConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .storageTypes( new StorageType[] {StorageType.DISK, StorageType.ARCHIVE, @@ -1516,7 +1518,7 @@ public void testCreateWithStoragePolicy() throws Throwable { @Test(timeout=60000) public void testListFiles() throws IOException { - Configuration conf = new HdfsConfiguration(); + Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { @@ -1539,7 +1541,7 @@ public void testListFiles() throws IOException { @Test public void testListStatusOfSnapshotDirs() throws IOException { - MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) + MiniDFSCluster cluster = new MiniDFSCluster.Builder(getTestConfiguration()) .build(); try { DistributedFileSystem dfs = cluster.getFileSystem(); @@ -1559,7 +1561,7 @@ public void testListStatusOfSnapshotDirs() throws IOException { @Test(timeout=10000) public void testDFSClientPeerReadTimeout() throws IOException { final int timeout = 1000; - final Configuration conf = new HdfsConfiguration(); + final Configuration conf = getTestConfiguration(); conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); // only need cluster to create a dfs client to get a peer @@ -1593,7 +1595,7 @@ public void testDFSClientPeerReadTimeout() throws IOException { @Test(timeout=60000) public void testGetServerDefaults() throws IOException { - Configuration conf = new HdfsConfiguration(); + Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); @@ -1608,7 +1610,7 @@ public void testGetServerDefaults() throws IOException { @Test(timeout=10000) public void testDFSClientPeerWriteTimeout() throws IOException { final int timeout = 1000; - final Configuration conf = new HdfsConfiguration(); + final Configuration conf = getTestConfiguration(); conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); // only need cluster to create a dfs client to get a peer @@ -1645,7 +1647,7 @@ public void testDFSClientPeerWriteTimeout() throws IOException { @Test(timeout = 30000) public void testTotalDfsUsed() throws Exception { - Configuration conf = new HdfsConfiguration(); + Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); @@ -1832,7 +1834,7 @@ public void testDFSDataOutputStreamBuilderForAppend() throws IOException { @Test public void testSuperUserPrivilege() throws Exception { - HdfsConfiguration conf = new HdfsConfiguration(); + HdfsConfiguration conf = getTestConfiguration(); File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString()); final Path jksPath = new Path(tmpDir.toString(), "test.jks"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, @@ -1885,7 +1887,7 @@ public Void run() throws Exception { @Test public void testListingStoragePolicyNonSuperUser() throws Exception { - HdfsConfiguration conf = new HdfsConfiguration(); + HdfsConfiguration conf = getTestConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); @@ -2037,7 +2039,7 @@ public Object run() throws Exception { @Test public void testStorageFavouredNodes() throws IOException, InterruptedException, TimeoutException { - Configuration conf = new HdfsConfiguration(); + Configuration conf = getTestConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .storageTypes(new StorageType[] {StorageType.SSD, StorageType.DISK}) .numDataNodes(3).storagesPerDatanode(2).build()) { @@ -2062,7 +2064,7 @@ public void testStorageFavouredNodes() @Test public void testGetECTopologyResultForPolicies() throws Exception { - Configuration conf = new HdfsConfiguration(); + Configuration conf = getTestConfiguration(); try (MiniDFSCluster cluster = DFSTestUtil.setupCluster(conf, 9, 3, 0)) { DistributedFileSystem dfs = cluster.getFileSystem(); dfs.enableErasureCodingPolicy("RS-6-3-1024k"); @@ -2090,4 +2092,18 @@ public void testGetECTopologyResultForPolicies() throws Exception { assertFalse(result.isSupported()); } } + + @Test + public void testCopyBetweenFsEqualPath() throws Exception { + Configuration conf = getTestConfiguration(); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + Path filePath = new Path("/dir/file"); + dfs.create(filePath).close(); + FileStatus fstatus = dfs.getFileStatus(filePath); + LambdaTestUtils.intercept(PathOperationException.class, + () -> FileUtil.copy(dfs, fstatus, dfs, filePath, false, true, conf)); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java index 59230edeb8d2e..a23c4bf412698 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java @@ -32,7 +32,7 @@ import java.util.List; import java.util.concurrent.TimeoutException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 451fe9c883571..2bd2324491b7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -43,7 +43,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java index 1b8724810234c..f65b6b13dc554 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java @@ -19,7 +19,7 @@ import static org.junit.Assert.assertTrue; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.KMSDelegationToken; import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java index de59a1d71e613..622980114a0a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.fs.*; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java index b983a8eaee82f..1b8dfa81946ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java index 85e25dd687ab9..f153b2c9d1724 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java index 0ff2d4b3cdaf7..83ac946ac7dee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; @@ -43,6 +44,8 @@ import java.util.Random; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; /** * This test serves a prototype to demo the idea proposed so far. It creates two @@ -534,6 +537,37 @@ public void testStripedFileChecksumWithMissedDataBlocksRangeQuery20() bytesPerCRC - 1); } + @Test(timeout = 90000) + public void testStripedFileChecksumWithReconstructFail() + throws Exception { + String stripedFile4 = ecDir + "/stripedFileChecksum4"; + prepareTestFiles(fileSize, new String[] {stripedFile4}); + + // get checksum + FileChecksum fileChecksum = getFileChecksum(stripedFile4, -1, false); + + DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get(); + DataNodeFaultInjector newInjector = mock(DataNodeFaultInjector.class); + doThrow(new IOException()) + .doNothing() + .when(newInjector) + .stripedBlockChecksumReconstruction(); + DataNodeFaultInjector.set(newInjector); + + try { + // Get checksum again with reconstruction. + // If the reconstruction task fails, a client try to get checksum from + // another DN which has a block of the block group because of a failure of + // getting result. + FileChecksum fileChecksum1 = getFileChecksum(stripedFile4, -1, true); + + Assert.assertEquals("checksum should be same", fileChecksum, + fileChecksum1); + } finally { + DataNodeFaultInjector.set(oldInjector); + } + } + @Test(timeout = 90000) public void testMixedBytesPerChecksum() throws Exception { int fileLength = bytesPerCRC * 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index 3e9d8121aca86..3665fef3ff861 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java index e3a1763811cee..e82b990a4e826 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java @@ -19,8 +19,8 @@ import static org.junit.Assert.*; -import java.io.IOException; import java.net.InetSocketAddress; +import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -28,37 +28,50 @@ import java.util.Random; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.test.LambdaTestUtils; + import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class tests if getblocks request works correctly. */ public class TestGetBlocks { - private static final int blockSize = 8192; - private static final String racks[] = new String[] { "/d1/r1", "/d1/r1", - "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3" }; - private static final int numDatanodes = racks.length; + private static final Logger LOG = + LoggerFactory.getLogger(TestBlockManager.class); + + private static final int BLOCK_SIZE = 8192; + private static final String[] RACKS = new String[]{"/d1/r1", "/d1/r1", + "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3"}; + private static final int NUM_DATA_NODES = RACKS.length; /** * Stop the heartbeat of a datanode in the MiniDFSCluster @@ -96,7 +109,7 @@ public void testReadSelectNonStaleDatanode() throws Exception { conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, staleInterval); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDatanodes).racks(racks).build(); + .numDataNodes(NUM_DATA_NODES).racks(RACKS).build(); cluster.waitActive(); InetSocketAddress addr = new InetSocketAddress("localhost", @@ -105,7 +118,7 @@ public void testReadSelectNonStaleDatanode() throws Exception { List nodeInfoList = cluster.getNameNode() .getNamesystem().getBlockManager().getDatanodeManager() .getDatanodeListForReport(DatanodeReportType.LIVE); - assertEquals("Unexpected number of datanodes", numDatanodes, + assertEquals("Unexpected number of datanodes", NUM_DATA_NODES, nodeInfoList.size()); FileSystem fileSys = cluster.getFileSystem(); FSDataOutputStream stm = null; @@ -116,14 +129,14 @@ public void testReadSelectNonStaleDatanode() throws Exception { stm = fileSys.create(fileName, true, fileSys.getConf().getInt( CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short) 3, blockSize); - stm.write(new byte[(blockSize * 3) / 2]); + (short) 3, BLOCK_SIZE); + stm.write(new byte[(BLOCK_SIZE * 3) / 2]); // We do not close the stream so that // the writing seems to be still ongoing stm.hflush(); LocatedBlocks blocks = client.getNamenode().getBlockLocations( - fileName.toString(), 0, blockSize); + fileName.toString(), 0, BLOCK_SIZE); DatanodeInfo[] nodes = blocks.get(0).getLocations(); assertEquals(nodes.length, 3); DataNode staleNode = null; @@ -139,7 +152,7 @@ public void testReadSelectNonStaleDatanode() throws Exception { -(staleInterval + 1)); LocatedBlocks blocksAfterStale = client.getNamenode().getBlockLocations( - fileName.toString(), 0, blockSize); + fileName.toString(), 0, BLOCK_SIZE); DatanodeInfo[] nodesAfterStale = blocksAfterStale.get(0).getLocations(); assertEquals(nodesAfterStale.length, 3); assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName()); @@ -175,133 +188,128 @@ public void testReadSelectNonStaleDatanode() throws Exception { } } - /** test getBlocks */ + /** + * Test getBlocks. + */ @Test public void testGetBlocks() throws Exception { - final Configuration CONF = new HdfsConfiguration(); - - final short REPLICATION_FACTOR = (short) 2; - final int DEFAULT_BLOCK_SIZE = 1024; + DistributedFileSystem fs = null; + Path testFile = null; + BlockWithLocations[] locs; + final int blkSize = 1024; + final String filePath = "/tmp.txt"; + final int blkLocsSize = 13; + long fileLen = 12 * blkSize + 1; + final short replicationFactor = (short) 2; + final Configuration config = new HdfsConfiguration(); + + // set configurations + config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blkSize); + config.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, + blkSize); + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(config) + .numDataNodes(replicationFactor) + .storagesPerDatanode(4) + .build(); - CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); - CONF.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, - DEFAULT_BLOCK_SIZE); - - MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF) - .numDataNodes(REPLICATION_FACTOR) - .storagesPerDatanode(4) - .build(); try { cluster.waitActive(); // the third block will not be visible to getBlocks - long fileLen = 12 * DEFAULT_BLOCK_SIZE + 1; - DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"), - fileLen, REPLICATION_FACTOR, 0L); + testFile = new Path(filePath); + DFSTestUtil.createFile(cluster.getFileSystem(), testFile, + fileLen, replicationFactor, 0L); // get blocks & data nodes - List locatedBlocks; - DatanodeInfo[] dataNodes = null; - boolean notWritten; - final DFSClient dfsclient = new DFSClient( - DFSUtilClient.getNNAddress(CONF), CONF); - do { - locatedBlocks = dfsclient.getNamenode() - .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks(); - assertEquals(13, locatedBlocks.size()); - notWritten = false; - for (int i = 0; i < 2; i++) { - dataNodes = locatedBlocks.get(i).getLocations(); - if (dataNodes.length != REPLICATION_FACTOR) { - notWritten = true; - try { - Thread.sleep(10); - } catch (InterruptedException e) { - } - break; - } - } - } while (notWritten); - dfsclient.close(); - + fs = cluster.getFileSystem(); + DFSTestUtil.waitForReplication(fs, testFile, replicationFactor, 60000); + RemoteIterator it = fs.listLocatedStatus(testFile); + LocatedFileStatus stat = it.next(); + BlockLocation[] blockLocations = stat.getBlockLocations(); + assertEquals(blkLocsSize, blockLocations.length); + HdfsDataInputStream dis = (HdfsDataInputStream) fs.open(testFile); + Collection dinfo = dis.getAllBlocks(); + dis.close(); + DatanodeInfo[] dataNodes = dinfo.iterator().next().getLocations(); // get RPC client to namenode InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); - NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, + NamenodeProtocol namenode = NameNodeProxies.createProxy(config, DFSUtilClient.getNNUri(addr), NamenodeProtocol.class).getProxy(); - // get blocks of size fileLen from dataNodes[0], with minBlockSize as - // fileLen - BlockWithLocations[] locs; - // Should return all 13 blocks, as minBlockSize is not passed - locs = namenode.getBlocks(dataNodes[0], fileLen, 0) - .getBlocks(); - assertEquals(13, locs.length); - assertEquals(locs[0].getStorageIDs().length, 2); - assertEquals(locs[1].getStorageIDs().length, 2); - - // Should return 12 blocks, as minBlockSize is DEFAULT_BLOCK_SIZE - locs = namenode.getBlocks(dataNodes[0], fileLen, DEFAULT_BLOCK_SIZE) - .getBlocks(); - assertEquals(12, locs.length); - assertEquals(locs[0].getStorageIDs().length, 2); - assertEquals(locs[1].getStorageIDs().length, 2); + locs = namenode.getBlocks(dataNodes[0], fileLen, 0).getBlocks(); + assertEquals(blkLocsSize, locs.length); + + assertEquals(locs[0].getStorageIDs().length, replicationFactor); + assertEquals(locs[1].getStorageIDs().length, replicationFactor); + + // Should return 12 blocks, as minBlockSize is blkSize + locs = namenode.getBlocks(dataNodes[0], fileLen, blkSize).getBlocks(); + assertEquals(blkLocsSize - 1, locs.length); + assertEquals(locs[0].getStorageIDs().length, replicationFactor); + assertEquals(locs[1].getStorageIDs().length, replicationFactor); // get blocks of size BlockSize from dataNodes[0] - locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE, - DEFAULT_BLOCK_SIZE).getBlocks(); + locs = namenode.getBlocks(dataNodes[0], blkSize, + blkSize).getBlocks(); assertEquals(locs.length, 1); - assertEquals(locs[0].getStorageIDs().length, 2); + assertEquals(locs[0].getStorageIDs().length, replicationFactor); // get blocks of size 1 from dataNodes[0] locs = namenode.getBlocks(dataNodes[0], 1, 1).getBlocks(); assertEquals(locs.length, 1); - assertEquals(locs[0].getStorageIDs().length, 2); + assertEquals(locs[0].getStorageIDs().length, replicationFactor); // get blocks of size 0 from dataNodes[0] - getBlocksWithException(namenode, dataNodes[0], 0, 0); + getBlocksWithException(namenode, dataNodes[0], 0, 0, + RemoteException.class, "IllegalArgumentException"); // get blocks of size -1 from dataNodes[0] - getBlocksWithException(namenode, dataNodes[0], -1, 0); + getBlocksWithException(namenode, dataNodes[0], -1, 0, + RemoteException.class, "IllegalArgumentException"); // minBlockSize is -1 - getBlocksWithException(namenode, dataNodes[0], DEFAULT_BLOCK_SIZE, -1); + getBlocksWithException(namenode, dataNodes[0], blkSize, -1, + RemoteException.class, "IllegalArgumentException"); // get blocks of size BlockSize from a non-existent datanode DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4"); - getBlocksWithIncorrectDatanodeException(namenode, info, 2, 0); - + getBlocksWithException(namenode, info, replicationFactor, 0, + RemoteException.class, "HadoopIllegalArgumentException"); testBlockIterator(cluster); - } finally { + + // Namenode should refuse to provide block locations to the balancer + // while in safemode. + locs = namenode.getBlocks(dataNodes[0], fileLen, 0).getBlocks(); + assertEquals(blkLocsSize, locs.length); + assertFalse(fs.isInSafeMode()); + LOG.info("Entering safe mode"); + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + LOG.info("Entered safe mode"); + assertTrue(fs.isInSafeMode()); + getBlocksWithException(namenode, info, replicationFactor, 0, + RemoteException.class, + "Cannot execute getBlocks. Name node is in safe mode."); + fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + assertFalse(fs.isInSafeMode()); + } finally { + if (fs != null) { + fs.delete(testFile, true); + fs.close(); + } cluster.shutdown(); } } private void getBlocksWithException(NamenodeProtocol namenode, - DatanodeInfo datanode, long size, long minBlockSize) throws IOException { - boolean getException = false; - try { - namenode.getBlocks(datanode, size, minBlockSize); - } catch (RemoteException e) { - getException = true; - assertTrue(e.getClassName().contains("IllegalArgumentException")); - } - assertTrue(getException); - } + DatanodeInfo datanode, long size, long minBlkSize, Class exClass, + String msg) throws Exception { - private void getBlocksWithIncorrectDatanodeException( - NamenodeProtocol namenode, DatanodeInfo datanode, long size, - long minBlockSize) - throws IOException { - boolean getException = false; - try { - namenode.getBlocks(datanode, size, minBlockSize); - } catch (RemoteException e) { - getException = true; - assertTrue(e.getClassName().contains("HadoopIllegalArgumentException")); - } - assertTrue(getException); + // Namenode should refuse should fail + LambdaTestUtils.intercept(exClass, + msg, () -> namenode.getBlocks(datanode, size, minBlkSize)); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java index 3463f573797e4..bc763d42fd480 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Set; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.lang3.ClassUtils; import org.apache.hadoop.hdfs.qjournal.server.JournalNodeRpcServer; import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java index 783531d3613f7..2fabbc50a2f10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java @@ -50,7 +50,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestHdfsAdmin { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index c82b47cec94df..ca3065088c40b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -17,32 +17,46 @@ */ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; import java.util.EnumSet; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.DataChecksum; import org.junit.After; import org.junit.Test; @@ -276,8 +290,22 @@ public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception { */ @Test public void testLeaseRecoveryAndAppend() throws Exception { + testLeaseRecoveryAndAppend(new Configuration()); + } + + /** + * Recover the lease on a file and append file from another client with + * ViewDFS enabled. + */ + @Test + public void testLeaseRecoveryAndAppendWithViewDFS() throws Exception { Configuration conf = new Configuration(); - try{ + conf.set("fs.hdfs.impl", ViewDistributedFileSystem.class.getName()); + testLeaseRecoveryAndAppend(conf); + } + + private void testLeaseRecoveryAndAppend(Configuration conf) throws Exception { + try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Path file = new Path("/testLeaseRecovery"); DistributedFileSystem dfs = cluster.getFileSystem(); @@ -314,4 +342,266 @@ public void testLeaseRecoveryAndAppend() throws Exception { } } } + + /** + * HDFS-14498 - test lease can be recovered for a file where the final + * block was never registered with the DNs, and hence the IBRs will never + * be received. In this case the final block should be zero bytes and can + * be removed. + */ + @Test + public void testLeaseRecoveryEmptyCommittedLastBlock() throws Exception { + Configuration conf = new Configuration(); + DFSClient client = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + client = + new DFSClient(cluster.getNameNode().getServiceRpcAddress(), conf); + String file = "/test/f1"; + Path filePath = new Path(file); + + createCommittedNotCompleteFile(client, file, null, 1); + + INodeFile inode = cluster.getNamesystem().getFSDirectory() + .getINode(filePath.toString()).asFile(); + assertTrue(inode.isUnderConstruction()); + assertEquals(1, inode.numBlocks()); + assertNotNull(inode.getLastBlock()); + + // Ensure a different client cannot append the file + try { + dfs.append(filePath); + fail("Append to a file(lease is held by another client) should fail"); + } catch (RemoteException e) { + assertTrue(e.getMessage().contains("file lease is currently owned")); + } + + // Lease will not be recovered on the first try + assertEquals(false, client.recoverLease(file)); + for (int i=0; i < 10 && !client.recoverLease(file); i++) { + Thread.sleep(1000); + } + assertTrue(client.recoverLease(file)); + + inode = cluster.getNamesystem().getFSDirectory() + .getINode(filePath.toString()).asFile(); + assertTrue(!inode.isUnderConstruction()); + assertEquals(0, inode.numBlocks()); + assertNull(inode.getLastBlock()); + + // Ensure the recovered file can now be written + FSDataOutputStream append = dfs.append(filePath); + append.write("test".getBytes()); + append.close(); + } finally { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + if (client != null) { + client.close(); + } + } + } + + /** + * HDFS-14498 - similar to testLeaseRecoveryEmptyCommittedLastBlock except + * we wait for the lease manager to recover the lease automatically. + */ + @Test + public void testLeaseManagerRecoversEmptyCommittedLastBlock() + throws Exception { + Configuration conf = new Configuration(); + DFSClient client = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + client = + new DFSClient(cluster.getNameNode().getServiceRpcAddress(), conf); + String file = "/test/f1"; + + createCommittedNotCompleteFile(client, file, null, 1); + waitLeaseRecovery(cluster); + + GenericTestUtils.waitFor(() -> { + String holder = NameNodeAdapter + .getLeaseHolderForPath(cluster.getNameNode(), file); + return holder == null; + }, 100, 10000); + + } finally { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + if (client != null) { + client.close(); + } + } + } + + @Test + public void testAbortedRecovery() throws Exception { + Configuration conf = new Configuration(); + DFSClient client = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + client = + new DFSClient(cluster.getNameNode().getServiceRpcAddress(), conf); + final String file = "/test/f1"; + + HdfsFileStatus stat = client.getNamenode() + .create(file, new FsPermission("777"), client.clientName, + new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), + true, (short) 1, 1024 * 1024 * 128L, + new CryptoProtocolVersion[0], null, null); + + assertNotNull(NameNodeAdapter.getLeaseHolderForPath( + cluster.getNameNode(), file)); + + // Add a block to the file + ExtendedBlock block = client.getNamenode().addBlock( + file, client.clientName, null, new DatanodeInfo[0], stat.getFileId(), + new String[0], null).getBlock(); + + // update the pipeline to get a new genstamp. + ExtendedBlock updatedBlock = client.getNamenode() + .updateBlockForPipeline(block, client.clientName) + .getBlock(); + // fake that some data was maybe written. commit block sync will + // reconcile. + updatedBlock.setNumBytes(1234); + + // get the stored block and make it look like the DN sent a RBW IBR. + BlockManager bm = cluster.getNamesystem().getBlockManager(); + BlockInfo storedBlock = bm.getStoredBlock(block.getLocalBlock()); + BlockUnderConstructionFeature uc = + storedBlock.getUnderConstructionFeature(); + uc.setExpectedLocations(updatedBlock.getLocalBlock(), + uc.getExpectedStorageLocations(), BlockType.CONTIGUOUS); + + // complete the file w/o updatePipeline to simulate client failure. + client.getNamenode().complete(file, client.clientName, block, + stat.getFileId()); + + assertNotNull(NameNodeAdapter.getLeaseHolderForPath( + cluster.getNameNode(), file)); + + cluster.setLeasePeriod(LEASE_PERIOD, LEASE_PERIOD); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + String holder = NameNodeAdapter + .getLeaseHolderForPath(cluster.getNameNode(), file); + return holder == null; + } + }, 100, 20000); + // nothing was actually written so the block should be dropped. + assertTrue(storedBlock.isDeleted()); + } finally { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + if (client != null) { + client.close(); + } + } + } + + @Test + public void testLeaseManagerRecoversCommittedLastBlockWithContent() + throws Exception { + Configuration conf = new Configuration(); + DFSClient client = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + client = + new DFSClient(cluster.getNameNode().getServiceRpcAddress(), conf); + String file = "/test/f2"; + + byte[] bytesToWrite = new byte[1]; + bytesToWrite[0] = 123; + createCommittedNotCompleteFile(client, file, bytesToWrite, 3); + + waitLeaseRecovery(cluster); + + DistributedFileSystem hdfs = cluster.getFileSystem(); + + // Now the least has been recovered, attempt to append the file and then + // ensure the earlier written and newly written data can be read back. + FSDataOutputStream op = null; + try { + op = hdfs.append(new Path(file)); + op.write(23); + } finally { + if (op != null) { + op.close(); + } + } + + FSDataInputStream stream = null; + try { + stream = cluster.getFileSystem().open(new Path(file)); + assertEquals(123, stream.readByte()); + assertEquals(23, stream.readByte()); + } finally { + stream.close(); + } + + // Finally check there are no leases for the file and hence the file is + // closed. + GenericTestUtils.waitFor(() -> { + String holder = NameNodeAdapter + .getLeaseHolderForPath(cluster.getNameNode(), file); + return holder == null; + }, 100, 10000); + + } finally { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + if (client != null) { + client.close(); + } + } + } + + private void createCommittedNotCompleteFile(DFSClient client, String file, + byte[] bytesToWrite, int repFactor) throws IOException { + HdfsFileStatus stat = client.getNamenode() + .create(file, new FsPermission("777"), client.clientName, + new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), + true, (short) repFactor, 1024 * 1024 * 128L, + new CryptoProtocolVersion[0], null, null); + // Add a block to the file + LocatedBlock blk = client.getNamenode() + .addBlock(file, client.clientName, null, + new DatanodeInfo[0], stat.getFileId(), new String[0], null); + ExtendedBlock finalBlock = blk.getBlock(); + if (bytesToWrite != null) { + // Here we create a output stream and then abort it so the block gets + // created on the datanode, but we never send the message to tell the DN + // to complete the block. This simulates the client crashing after it + // wrote the data, but before the file gets closed. + DFSOutputStream s = new DFSOutputStream(client, file, stat, + EnumSet.of(CreateFlag.CREATE), null, + DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512), + null, true); + s.start(); + s.write(bytesToWrite); + s.hflush(); + finalBlock = s.getBlock(); + s.abort(); + } + // Attempt to close the file. This will fail (return false) as the NN will + // be expecting the registered block to be reported from the DNs via IBR, + // but that will never happen, as we either did not write it, or we aborted + // the stream preventing the "close block" message to be sent to the DN. + boolean closed = client.getNamenode().complete( + file, client.clientName, finalBlock, stat.getFileId()); + assertEquals(false, closed); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 662b0e6996160..bfa3deaa6b1b4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -29,7 +29,7 @@ import java.util.HashMap; import java.util.Map; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java index c87a6d17e8dc1..f6491ef039ce9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Preconditions; -import com.google.common.base.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import java.util.function.Supplier; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java index fcff0e2fa49b9..265d410729017 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java @@ -60,8 +60,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * This class tests node maintenance. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index d60e0253a8d74..74a8e44bf7b0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -42,7 +42,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Tests MiniDFS cluster setup/teardown and isolation. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java index db42dcc254e2e..d536c5e8a969a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java @@ -25,7 +25,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; @@ -251,55 +250,33 @@ public void testMultipleNNPortOverwriteDownStream() throws Exception { clientConf.set(HADOOP_RPC_PROTECTION, "privacy"); FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf); doTest(fsPrivacy, PATH1); - for (int i = 0; i < 2; i++) { - DataNode dn = dataNodes.get(i); - SaslDataTransferClient saslClient = dn.getSaslClient(); - String qop = null; - // It may take some time for the qop to populate - // to all DNs, check in a loop. - for (int trial = 0; trial < 10; trial++) { - qop = saslClient.getTargetQOP(); - if (qop != null) { - break; - } - Thread.sleep(100); - } - assertEquals("auth", qop); - } + long count = dataNodes.stream() + .map(dn -> dn.getSaslClient().getTargetQOP()) + .filter("auth"::equals) + .count(); + // For each datanode pipeline, targetQOPs of sasl clients in the first two + // datanodes become equal to auth. + // Note that it is not necessarily the case for all datanodes, + // since a datanode may be always at the last position in pipelines. + assertTrue("At least two qops should be auth", count >= 2); clientConf.set(HADOOP_RPC_PROTECTION, "integrity"); FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf); doTest(fsIntegrity, PATH2); - for (int i = 0; i < 2; i++) { - DataNode dn = dataNodes.get(i); - SaslDataTransferClient saslClient = dn.getSaslClient(); - String qop = null; - for (int trial = 0; trial < 10; trial++) { - qop = saslClient.getTargetQOP(); - if (qop != null) { - break; - } - Thread.sleep(100); - } - assertEquals("auth", qop); - } + count = dataNodes.stream() + .map(dn -> dn.getSaslClient().getTargetQOP()) + .filter("auth"::equals) + .count(); + assertTrue("At least two qops should be auth", count >= 2); clientConf.set(HADOOP_RPC_PROTECTION, "authentication"); FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf); doTest(fsAuth, PATH3); - for (int i = 0; i < 3; i++) { - DataNode dn = dataNodes.get(i); - SaslDataTransferServer saslServer = dn.getSaslServer(); - String qop = null; - for (int trial = 0; trial < 10; trial++) { - qop = saslServer.getNegotiatedQOP(); - if (qop != null) { - break; - } - Thread.sleep(100); - } - assertEquals("auth", qop); - } + count = dataNodes.stream() + .map(dn -> dn.getSaslServer().getNegotiatedQOP()) + .filter("auth"::equals) + .count(); + assertEquals("All qops should be auth", 3, count); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java index 4a9949340faae..c498160950ead 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java @@ -76,14 +76,15 @@ public class TestPersistBlocks { /** check if DFS remains in proper condition after a restart **/ @Test - public void TestRestartDfsWithFlush() throws Exception { + public void testRestartDfsWithFlush() throws Exception { testRestartDfs(true); } /** check if DFS remains in proper condition after a restart **/ - public void TestRestartDfsWithSync() throws Exception { + @Test + public void testRestartDfsWithSync() throws Exception { testRestartDfs(false); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index 0a64e268b67d8..ea332c85d2394 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -58,7 +58,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.LoggerFactory; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 7754b53653cf4..79088d3be8555 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -36,7 +36,7 @@ import java.util.List; import java.util.Scanner; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataOutputStream; @@ -64,8 +64,8 @@ import org.junit.Rule; import org.junit.Test; -import com.google.common.base.Charsets; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.junit.rules.Timeout; import org.slf4j.Logger; import org.slf4j.event.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 2abfff7876c13..5e29868fdcde5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -23,6 +23,7 @@ import java.io.File; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.BitSet; @@ -34,6 +35,12 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.datanode.erasurecode.ErasureCodingTestHelper; +import org.apache.hadoop.io.ElasticByteBufferPool; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -514,6 +521,8 @@ private void testNNSendsErasureCodingTasks(int deadDN) throws Exception { @Test(timeout = 180000) public void testErasureCodingWorkerXmitsWeight() throws Exception { + testErasureCodingWorkerXmitsWeight(0.5f, + (int) (ecPolicy.getNumDataUnits() * 0.5f)); testErasureCodingWorkerXmitsWeight(1f, ecPolicy.getNumDataUnits()); testErasureCodingWorkerXmitsWeight(0f, 1); testErasureCodingWorkerXmitsWeight(10f, 10 * ecPolicy.getNumDataUnits()); @@ -538,7 +547,7 @@ private void testErasureCodingWorkerXmitsWeight( writeFile(fs, "/ec-xmits-weight", fileLen); DataNode dn = cluster.getDataNodes().get(0); - int corruptBlocks = dn.getFSDataset().getFinalizedBlocks( + int corruptBlocks = dn.getFSDataset().getSortedFinalizedBlocks( cluster.getNameNode().getNamesystem().getBlockPoolId()).size(); int expectedXmits = corruptBlocks * expectedWeight; @@ -567,6 +576,243 @@ public void stripedBlockReconstruction() throws IOException { } finally { barrier.await(); DataNodeFaultInjector.set(oldInjector); + for (final DataNode curDn : cluster.getDataNodes()) { + GenericTestUtils.waitFor(() -> curDn.getXceiverCount() <= 1, 10, 60000); + assertEquals(0, curDn.getXmitsInProgress()); + } + } + } + + /** + * When the StripedBlockReader timeout, the outdated future should be ignored. + * Or the NPE will be thrown, which will stop reading the remaining data, and + * the reconstruction task will fail. + */ + @Test(timeout = 120000) + public void testTimeoutReadBlockInReconstruction() throws Exception { + assumeTrue("Ignore case where num parity units <= 1", + ecPolicy.getNumParityUnits() > 1); + int stripedBufferSize = conf.getInt( + DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY, + cellSize); + ErasureCodingPolicy policy = ecPolicy; + fs.enableErasureCodingPolicy(policy.getName()); + fs.getClient().setErasureCodingPolicy("/", policy.getName()); + + // StripedBlockReconstructor#reconstruct will loop 2 times + final int fileLen = stripedBufferSize * 2 * ecPolicy.getNumDataUnits(); + String fileName = "/timeout-read-block"; + Path file = new Path(fileName); + writeFile(fs, fileName, fileLen); + fs.getFileBlockLocations(file, 0, fileLen); + + LocatedBlocks locatedBlocks = + StripedFileTestUtil.getLocatedBlocks(file, fs); + Assert.assertEquals(1, locatedBlocks.getLocatedBlocks().size()); + // The file only has one block group + LocatedBlock lblock = locatedBlocks.get(0); + DatanodeInfo[] datanodeinfos = lblock.getLocations(); + + // to reconstruct first block + DataNode dataNode = cluster.getDataNode(datanodeinfos[0].getIpcPort()); + + int stripedReadTimeoutInMills = conf.getInt( + DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY, + DFSConfigKeys. + DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT); + Assert.assertTrue( + DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY + + " must be greater than 2000", + stripedReadTimeoutInMills > 2000); + + DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get(); + DataNodeFaultInjector timeoutInjector = new DataNodeFaultInjector() { + private AtomicInteger numDelayReader = new AtomicInteger(0); + + @Override + public void delayBlockReader() { + int index = numDelayReader.incrementAndGet(); + LOG.info("Delay the {}th read block", index); + + // the file's first StripedBlockReconstructor#reconstruct, + // and the first reader will timeout + if (index == 1) { + try { + GenericTestUtils.waitFor(() -> numDelayReader.get() >= + ecPolicy.getNumDataUnits() + 1, 50, + stripedReadTimeoutInMills * 3 + ); + } catch (TimeoutException e) { + Assert.fail("Can't reconstruct the file's first part."); + } catch (InterruptedException e) { + } + } + // stop all the following re-reconstruction tasks + if (index > 3 * ecPolicy.getNumDataUnits() + 1) { + while (true) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + } + } + } + } + }; + DataNodeFaultInjector.set(timeoutInjector); + + try { + shutdownDataNode(dataNode); + // before HDFS-15240, NPE will cause reconstruction fail(test timeout) + StripedFileTestUtil + .waitForReconstructionFinished(file, fs, groupSize); + } finally { + DataNodeFaultInjector.set(oldInjector); + } + } + + /** + * When block reader timeout, the outdated future should be ignored. + * Or the ByteBuffer would be wrote after giving back to the BufferPool. + * This UT is used to ensure that we should close block reader + * before freeing the buffer. + */ + @Test(timeout = 120000) + public void testAbnormallyCloseDoesNotWriteBufferAgain() throws Exception { + assumeTrue("Ignore case where num parity units <= 1", + ecPolicy.getNumParityUnits() > 1); + int stripedBufferSize = conf.getInt( + DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY, + cellSize); + // StripedBlockReconstructor#reconstruct will loop 2 times + final int fileLen = stripedBufferSize * 2 * ecPolicy.getNumDataUnits(); + String fileName = "/no-dirty-buffer"; + Path file = new Path(fileName); + writeFile(fs, fileName, fileLen); + fs.getFileBlockLocations(file, 0, fileLen); + + LocatedBlocks locatedBlocks = + StripedFileTestUtil.getLocatedBlocks(file, fs); + Assert.assertEquals(1, locatedBlocks.getLocatedBlocks().size()); + // The file only has one block group + LocatedBlock lblock = locatedBlocks.get(0); + DatanodeInfo[] datanodeinfos = lblock.getLocations(); + + // to reconstruct first block + DataNode dataNode = cluster.getDataNode(datanodeinfos[0].getIpcPort()); + + int stripedReadTimeoutInMills = conf.getInt( + DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY, + DFSConfigKeys. + DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT); + Assert.assertTrue( + DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY + + " must be greater than 2000", + stripedReadTimeoutInMills > 2000); + + ElasticByteBufferPool bufferPool = + (ElasticByteBufferPool) ErasureCodingTestHelper.getBufferPool(); + emptyBufferPool(bufferPool, true); + emptyBufferPool(bufferPool, false); + + AtomicInteger finishedReadBlock = new AtomicInteger(0); + + DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get(); + DataNodeFaultInjector timeoutInjector = new DataNodeFaultInjector() { + private AtomicInteger numDelayReader = new AtomicInteger(0); + private AtomicBoolean continueRead = new AtomicBoolean(false); + private AtomicBoolean closeByNPE = new AtomicBoolean(false); + + @Override + public void delayBlockReader() { + int index = numDelayReader.incrementAndGet(); + LOG.info("Delay the {}th read block", index); + + // the file's first StripedBlockReconstructor#reconstruct, + // and the first reader will timeout + if (index == 1) { + try { + GenericTestUtils.waitFor(() -> numDelayReader.get() >= + ecPolicy.getNumDataUnits() + 1, 50, + stripedReadTimeoutInMills * 3 + ); + } catch (TimeoutException e) { + Assert.fail("Can't reconstruct the file's first part."); + } catch (InterruptedException e) { + } + } + if (index > ecPolicy.getNumDataUnits() + 1) { + try { + GenericTestUtils.waitFor( + () -> { + LOG.info("Close by NPE: {}, continue read: {}", + closeByNPE, continueRead); + return closeByNPE.get() ? continueRead.get() + : index == finishedReadBlock.get() + 1; }, 5, + stripedReadTimeoutInMills * 3 + ); + } catch (TimeoutException e) { + Assert.fail("Can't reconstruct the file's remaining part."); + } catch (InterruptedException e) { + } + } + } + + @Override + public void interceptBlockReader() { + int n = finishedReadBlock.incrementAndGet(); + LOG.info("Intercept the end of {}th read block.", n); + } + + private AtomicInteger numFreeBuffer = new AtomicInteger(0); + @Override + public void interceptFreeBlockReaderBuffer() { + closeByNPE.compareAndSet(false, true); + int num = numFreeBuffer.incrementAndGet(); + LOG.info("Intercept the {} free block buffer.", num); + if (num >= ecPolicy.getNumDataUnits() + 1) { + continueRead.compareAndSet(false, true); + try { + GenericTestUtils.waitFor(() -> finishedReadBlock.get() >= + 2 * ecPolicy.getNumDataUnits() + 1, 50, + stripedReadTimeoutInMills * 3 + ); + } catch (TimeoutException e) { + Assert.fail("Can't finish the file's reconstruction."); + } catch (InterruptedException e) { + } + } + } + }; + DataNodeFaultInjector.set(timeoutInjector); + try { + shutdownDataNode(dataNode); + // at least one timeout reader + GenericTestUtils.waitFor(() -> finishedReadBlock.get() >= + 2 * ecPolicy.getNumDataUnits() + 1, 50, + stripedReadTimeoutInMills * 3 + ); + + assertBufferPoolIsEmpty(bufferPool, false); + assertBufferPoolIsEmpty(bufferPool, true); + StripedFileTestUtil.waitForReconstructionFinished(file, fs, groupSize); + } finally { + DataNodeFaultInjector.set(oldInjector); + } + } + + private void assertBufferPoolIsEmpty(ElasticByteBufferPool bufferPool, + boolean direct) { + while (bufferPool.size(direct) != 0) { + // iterate all ByteBuffers in ElasticByteBufferPool + ByteBuffer byteBuffer = bufferPool.getBuffer(direct, 0); + Assert.assertEquals(0, byteBuffer.position()); + } + } + + private void emptyBufferPool(ElasticByteBufferPool bufferPool, + boolean direct) { + while (bufferPool.size(direct) != 0) { + bufferPool.getBuffer(direct, 0); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index 2e455f7d34691..a63eb421eed88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.io.IOException; import java.util.Arrays; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index 6af710673704d..9a9ab1b9e415a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -24,7 +24,7 @@ import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.io.IOException; import java.io.InputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 7fbf222e7abd0..b741ec695d14e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -59,8 +59,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Tests to verify safe mode correctness. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java index a43cc52a69402..21af9ec597261 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java index a8e2a7114ab93..cce145454578c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java @@ -32,7 +32,7 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys. DFS_DATA_TRANSFER_PROTECTION_KEY; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.KMSConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystem.java new file mode 100644 index 0000000000000..da0cb59a19376 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystem.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathHandle; +import org.apache.hadoop.fs.viewfs.ConfigUtil; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.test.Whitebox; +import org.junit.Test; + +import java.io.IOException; +import java.net.URI; + +public class TestViewDistributedFileSystem extends TestDistributedFileSystem{ + @Override + HdfsConfiguration getTestConfiguration() { + HdfsConfiguration conf = super.getTestConfiguration(); + conf.set("fs.hdfs.impl", ViewDistributedFileSystem.class.getName()); + return conf; + } + + @Override + public void testStatistics() throws IOException { + FileSystem.getStatistics(HdfsConstants.HDFS_URI_SCHEME, + ViewDistributedFileSystem.class).reset(); + @SuppressWarnings("unchecked") + ThreadLocal data = + (ThreadLocal) Whitebox + .getInternalState(FileSystem + .getStatistics(HdfsConstants.HDFS_URI_SCHEME, + ViewDistributedFileSystem.class), "threadData"); + data.set(null); + super.testStatistics(); + } + + @Test + public void testOpenWithPathHandle() throws Exception { + Configuration conf = getTestConfiguration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + FileSystem fileSys = cluster.getFileSystem(); + Path openTestPath = new Path("/testOpen"); + fileSys.create(openTestPath).close(); + PathHandle pathHandle = + fileSys.getPathHandle(fileSys.getFileStatus(openTestPath)); + fileSys.open(pathHandle, 1024).close(); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + @Override + public void testEmptyDelegationToken() throws IOException { + Configuration conf = getTestConfiguration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + URI defaultUri = + URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); + ConfigUtil.addLinkFallback(conf, defaultUri.getHost(), defaultUri); + try (FileSystem fileSys = FileSystem.get(conf)) { + fileSys.getDelegationToken(""); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemContract.java new file mode 100644 index 0000000000000..810c4cb61ffc8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemContract.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemContractBaseTest; +import org.apache.hadoop.fs.viewfs.ConfigUtil; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.net.URI; + +public class TestViewDistributedFileSystemContract + extends TestHDFSFileSystemContract { + private static MiniDFSCluster cluster; + private static String defaultWorkingDirectory; + private static Configuration conf = new HdfsConfiguration(); + + @BeforeClass + public static void init() throws IOException { + final File basedir = GenericTestUtils.getRandomizedTestDir(); + conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, + FileSystemContractBaseTest.TEST_UMASK); + cluster = new MiniDFSCluster.Builder(conf, basedir) + .numDataNodes(2) + .build(); + defaultWorkingDirectory = + "/user/" + UserGroupInformation.getCurrentUser().getShortUserName(); + } + + @Before + public void setUp() throws Exception { + conf.set("fs.hdfs.impl", ViewDistributedFileSystem.class.getName()); + URI defaultFSURI = + URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); + ConfigUtil.addLink(conf, defaultFSURI.getHost(), "/user", + defaultFSURI); + ConfigUtil.addLinkFallback(conf, defaultFSURI.getHost(), + defaultFSURI); + fs = FileSystem.get(conf); + } + + @AfterClass + public static void tearDownAfter() throws Exception { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Override + protected String getDefaultWorkingDirectory() { + return defaultWorkingDirectory; + } + + @Test + public void testRenameRootDirForbidden() throws Exception { + LambdaTestUtils.intercept(AccessControlException.class, + "InternalDir of ViewFileSystem is readonly", () -> { + super.testRenameRootDirForbidden(); + }); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemWithMountLinks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemWithMountLinks.java new file mode 100644 index 0000000000000..1e66252d5171a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemWithMountLinks.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.viewfs.ConfigUtil; +import org.apache.hadoop.fs.viewfs.TestViewFileSystemOverloadSchemeWithHdfsScheme; +import org.apache.hadoop.fs.viewfs.ViewFsTestSetup; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.net.URI; + +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; +import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT; + +public class TestViewDistributedFileSystemWithMountLinks extends + TestViewFileSystemOverloadSchemeWithHdfsScheme { + @Override + public void setUp() throws IOException { + super.setUp(); + Configuration conf = getConf(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, + true); + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); + conf.set("fs.hdfs.impl", + ViewDistributedFileSystem.class.getName()); + conf.setBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME, + CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT); + URI defaultFSURI = + URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); + ConfigUtil.addLinkFallback(conf, defaultFSURI.getAuthority(), + new Path(defaultFSURI.toString() + "/").toUri()); + setConf(conf); + } + + @Test(timeout = 30000) + public void testCreateOnRoot() throws Exception { + testCreateOnRoot(true); + } + + @Test(timeout = 30000) + public void testMountLinkWithNonExistentLink() throws Exception { + testMountLinkWithNonExistentLink(false); + } + + @Test + public void testRenameOnInternalDirWithFallback() throws Exception { + Configuration conf = getConf(); + URI defaultFSURI = + URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); + final Path hdfsTargetPath1 = new Path(defaultFSURI + "/HDFSUser"); + final Path hdfsTargetPath2 = new Path(defaultFSURI + "/NewHDFSUser/next"); + ViewFsTestSetup.addMountLinksToConf(defaultFSURI.getAuthority(), + new String[] {"/HDFSUser", "/NewHDFSUser/next"}, + new String[] {hdfsTargetPath1.toUri().toString(), + hdfsTargetPath2.toUri().toString()}, conf); + //Making sure parent dir structure as mount points available in fallback. + try (DistributedFileSystem dfs = new DistributedFileSystem()) { + dfs.initialize(defaultFSURI, conf); + dfs.mkdirs(hdfsTargetPath1); + dfs.mkdirs(hdfsTargetPath2); + } + + try (FileSystem fs = FileSystem.get(conf)) { + Path src = new Path("/newFileOnRoot"); + Path dst = new Path("/newFileOnRoot1"); + fs.create(src).close(); + verifyRename(fs, src, dst); + + src = new Path("/newFileOnRoot1"); + dst = new Path("/NewHDFSUser/newFileOnRoot"); + fs.mkdirs(dst.getParent()); + verifyRename(fs, src, dst); + + src = new Path("/NewHDFSUser/newFileOnRoot"); + dst = new Path("/NewHDFSUser/newFileOnRoot1"); + verifyRename(fs, src, dst); + + src = new Path("/NewHDFSUser/newFileOnRoot1"); + dst = new Path("/newFileOnRoot"); + verifyRename(fs, src, dst); + + src = new Path("/HDFSUser/newFileOnRoot1"); + dst = new Path("/HDFSUser/newFileOnRoot"); + fs.create(src).close(); + verifyRename(fs, src, dst); + } + } + + @Test + public void testRenameWhenDstOnInternalDirWithFallback() throws Exception { + Configuration conf = getConf(); + URI defaultFSURI = + URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); + final Path hdfsTargetPath1 = new Path(defaultFSURI + "/HDFSUser"); + final Path hdfsTargetPath2 = + new Path(defaultFSURI + "/dstNewHDFSUser" + "/next"); + ViewFsTestSetup.addMountLinksToConf(defaultFSURI.getAuthority(), + new String[] {"/InternalDirDoesNotExistInFallback/test", + "/NewHDFSUser/next/next1"}, + new String[] {hdfsTargetPath1.toUri().toString(), + hdfsTargetPath2.toUri().toString()}, conf); + try (DistributedFileSystem dfs = new DistributedFileSystem()) { + dfs.initialize(defaultFSURI, conf); + dfs.mkdirs(hdfsTargetPath1); + dfs.mkdirs(hdfsTargetPath2); + dfs.mkdirs(new Path("/NewHDFSUser/next/next1")); + } + + try (FileSystem fs = FileSystem.get(conf)) { + Path src = new Path("/newFileOnRoot"); + Path dst = new Path("/NewHDFSUser/next"); + fs.create(src).close(); + verifyRename(fs, src, dst); + + src = new Path("/newFileOnRoot"); + dst = new Path("/NewHDFSUser/next/file"); + fs.create(src).close(); + verifyRename(fs, src, dst); + + src = new Path("/newFileOnRoot"); + dst = new Path("/InternalDirDoesNotExistInFallback/file"); + fs.create(src).close(); + // If fallback does not have same structure as internal, rename will fail. + Assert.assertFalse(fs.rename(src, dst)); + } + } + + private void verifyRename(FileSystem fs, Path src, Path dst) + throws IOException { + fs.rename(src, dst); + Assert.assertFalse(fs.exists(src)); + Assert.assertTrue(fs.exists(dst)); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 621bd514ee155..2b90c92388f32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -51,9 +51,9 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import com.google.common.base.Preconditions; -import com.google.common.io.Files; -import com.google.common.primitives.Bytes; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; /** * This class defines a number of static helper methods used by the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java index 6b04b14f49a77..fd9963e57fb54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java @@ -69,7 +69,7 @@ import org.junit.Rule; import org.junit.Test; -import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.junit.rules.ExpectedException; import org.junit.rules.Timeout; import org.slf4j.Logger; @@ -389,7 +389,7 @@ private void testShortCircuitCacheUnbufferWithDisableInterval( try (FSDataInputStream in = dfs.open(testFile)) { Assert.assertEquals(0, - dfs.getClient().getClientContext().getShortCircuitCache() + dfs.getClient().getClientContext().getShortCircuitCache(0) .getReplicaInfoMapSize()); final byte[] buf = new byte[testFileLen]; @@ -398,12 +398,12 @@ private void testShortCircuitCacheUnbufferWithDisableInterval( // Set cache size to 0 so the replica marked evictable by unbuffer // will be purged immediately. - dfs.getClient().getClientContext().getShortCircuitCache() + dfs.getClient().getClientContext().getShortCircuitCache(0) .setMaxTotalSize(0); LOG.info("Unbuffering"); in.unbuffer(); Assert.assertEquals(0, - dfs.getClient().getClientContext().getShortCircuitCache() + dfs.getClient().getClientContext().getShortCircuitCache(0) .getReplicaInfoMapSize()); DFSTestUtil.appendFile(dfs, testFile, "append more data"); @@ -432,7 +432,7 @@ private void validateReadResult(final DistributedFileSystem dfs, final int expectedScrRepMapSize) { Assert.assertThat(expected, CoreMatchers.is(actual)); Assert.assertEquals(expectedScrRepMapSize, - dfs.getClient().getClientContext().getShortCircuitCache() + dfs.getClient().getClientContext().getShortCircuitCache(0) .getReplicaInfoMapSize()); } @@ -467,7 +467,7 @@ public void testShortCircuitReadFromServerWithoutShm() throws Exception { calculateFileContentsFromSeed(SEED, TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents, expected)); final ShortCircuitCache cache = - fs.getClient().getClientContext().getShortCircuitCache(); + fs.getClient().getClientContext().getShortCircuitCache(0); final DatanodeInfo datanode = new DatanodeInfoBuilder() .setNodeID(cluster.getDataNodes().get(0).getDatanodeId()) .build(); @@ -516,7 +516,7 @@ public void testShortCircuitReadFromClientWithoutShm() throws Exception { calculateFileContentsFromSeed(SEED, TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents, expected)); final ShortCircuitCache cache = - fs.getClient().getClientContext().getShortCircuitCache(); + fs.getClient().getClientContext().getShortCircuitCache(0); Assert.assertEquals(null, cache.getDfsClientShmManager()); cluster.shutdown(); sockDir.close(); @@ -548,7 +548,7 @@ public void testShortCircuitCacheShutdown() throws Exception { calculateFileContentsFromSeed(SEED, TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents, expected)); final ShortCircuitCache cache = - fs.getClient().getClientContext().getShortCircuitCache(); + fs.getClient().getClientContext().getShortCircuitCache(0); cache.close(); Assert.assertTrue(cache.getDfsClientShmManager(). getDomainSocketWatcher().isClosed()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java index 95fb67a1a4e19..534243ddf318a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java @@ -116,7 +116,7 @@ private static void readFully(BlockReaderLocal reader, } private static class BlockReaderLocalTest { - final static int TEST_LENGTH = 12345; + final static int TEST_LENGTH = 1234567; final static int BYTES_PER_CHECKSUM = 512; public void setConfiguration(HdfsConfiguration conf) { @@ -130,10 +130,14 @@ public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { // default: no-op } - } + public void doTest(BlockReaderLocal reader, byte[] original, int shift) + throws IOException { + // default: no-op + } } public void runBlockReaderLocalTest(BlockReaderLocalTest test, - boolean checksum, long readahead) throws IOException { + boolean checksum, long readahead, int shortCircuitCachesNum) + throws IOException { Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null)); MiniDFSCluster cluster = null; HdfsConfiguration conf = new HdfsConfiguration(); @@ -143,10 +147,13 @@ public void runBlockReaderLocalTest(BlockReaderLocalTest test, BlockReaderLocalTest.BYTES_PER_CHECKSUM); conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C"); conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SHORT_CIRCUIT_NUM, + shortCircuitCachesNum); test.setConfiguration(conf); FileInputStream dataIn = null, metaIn = null; final Path TEST_PATH = new Path("/a"); final long RANDOM_SEED = 4567L; + final int blockSize = 10 * 1024; BlockReaderLocal blockReaderLocal = null; FSDataInputStream fsIn = null; byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; @@ -158,8 +165,8 @@ public void runBlockReaderLocalTest(BlockReaderLocalTest test, cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); - DFSTestUtil.createFile(fs, TEST_PATH, - BlockReaderLocalTest.TEST_LENGTH, (short)1, RANDOM_SEED); + DFSTestUtil.createFile(fs, TEST_PATH, 1024, + BlockReaderLocalTest.TEST_LENGTH, blockSize, (short)1, RANDOM_SEED); try { DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); } catch (InterruptedException e) { @@ -174,47 +181,52 @@ public void runBlockReaderLocalTest(BlockReaderLocalTest test, BlockReaderLocalTest.TEST_LENGTH); fsIn.close(); fsIn = null; - ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH); - File dataFile = cluster.getBlockFile(0, block); - File metaFile = cluster.getBlockMetadataFile(0, block); - - ShortCircuitCache shortCircuitCache = - ClientContext.getFromConf(conf).getShortCircuitCache(); + for (int i = 0; i < shortCircuitCachesNum; i++) { + ExtendedBlock block = DFSTestUtil.getAllBlocks( + fs, TEST_PATH).get(i).getBlock(); + File dataFile = cluster.getBlockFile(0, block); + File metaFile = cluster.getBlockMetadataFile(0, block); + + ShortCircuitCache shortCircuitCache = + ClientContext.getFromConf(conf).getShortCircuitCache( + block.getBlockId()); + test.setup(dataFile, checksum); + FileInputStream[] streams = { + new FileInputStream(dataFile), + new FileInputStream(metaFile) + }; + dataIn = streams[0]; + metaIn = streams[1]; + ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(), + block.getBlockPoolId()); + raf = new RandomAccessFile( + new File(sockDir.getDir().getAbsolutePath(), + UUID.randomUUID().toString()), "rw"); + raf.setLength(8192); + FileInputStream shmStream = new FileInputStream(raf.getFD()); + shm = new ShortCircuitShm(ShmId.createRandom(), shmStream); + ShortCircuitReplica replica = + new ShortCircuitReplica(key, dataIn, metaIn, shortCircuitCache, + Time.now(), shm.allocAndRegisterSlot( + ExtendedBlockId.fromExtendedBlock(block))); + blockReaderLocal = new BlockReaderLocal.Builder( + new DfsClientConf.ShortCircuitConf(conf)). + setFilename(TEST_PATH.getName()). + setBlock(block). + setShortCircuitReplica(replica). + setCachingStrategy(new CachingStrategy(false, readahead)). + setVerifyChecksum(checksum). + build(); + dataIn = null; + metaIn = null; + test.doTest(blockReaderLocal, original, i * blockSize); + // BlockReaderLocal should not alter the file position. + Assert.assertEquals(0, streams[0].getChannel().position()); + Assert.assertEquals(0, streams[1].getChannel().position()); + } cluster.shutdown(); cluster = null; - test.setup(dataFile, checksum); - FileInputStream streams[] = { - new FileInputStream(dataFile), - new FileInputStream(metaFile) - }; - dataIn = streams[0]; - metaIn = streams[1]; - ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(), - block.getBlockPoolId()); - raf = new RandomAccessFile( - new File(sockDir.getDir().getAbsolutePath(), - UUID.randomUUID().toString()), "rw"); - raf.setLength(8192); - FileInputStream shmStream = new FileInputStream(raf.getFD()); - shm = new ShortCircuitShm(ShmId.createRandom(), shmStream); - ShortCircuitReplica replica = - new ShortCircuitReplica(key, dataIn, metaIn, shortCircuitCache, - Time.now(), shm.allocAndRegisterSlot( - ExtendedBlockId.fromExtendedBlock(block))); - blockReaderLocal = new BlockReaderLocal.Builder( - new DfsClientConf.ShortCircuitConf(conf)). - setFilename(TEST_PATH.getName()). - setBlock(block). - setShortCircuitReplica(replica). - setCachingStrategy(new CachingStrategy(false, readahead)). - setVerifyChecksum(checksum). - build(); - dataIn = null; - metaIn = null; - test.doTest(blockReaderLocal, original); - // BlockReaderLocal should not alter the file position. - Assert.assertEquals(0, streams[0].getChannel().position()); - Assert.assertEquals(0, streams[1].getChannel().position()); + } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); @@ -227,6 +239,11 @@ public void runBlockReaderLocalTest(BlockReaderLocalTest test, } } + public void runBlockReaderLocalTest(BlockReaderLocalTest test, + boolean checksum, long readahead) throws IOException { + runBlockReaderLocalTest(test, checksum, readahead, 1); + } + private static class TestBlockReaderLocalImmediateClose extends BlockReaderLocalTest { } @@ -242,7 +259,7 @@ private static class TestBlockReaderSimpleReads @Override public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { - byte buf[] = new byte[TEST_LENGTH]; + byte[] buf = new byte[TEST_LENGTH]; reader.readFully(buf, 0, 512); assertArrayRegionsEqual(original, 0, buf, 0, 512); reader.readFully(buf, 512, 512); @@ -291,7 +308,7 @@ private static class TestBlockReaderLocalArrayReads2 @Override public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { - byte buf[] = new byte[TEST_LENGTH]; + byte[] buf = new byte[TEST_LENGTH]; reader.readFully(buf, 0, 10); assertArrayRegionsEqual(original, 0, buf, 0, 10); reader.readFully(buf, 10, 100); @@ -369,7 +386,7 @@ public void testBlockReaderLocalByteBufferReadsNoChecksum() public void testBlockReaderLocalByteBufferReadsNoReadahead() throws IOException { runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferReads(), - true, 0); + true, 0); } @Test @@ -468,7 +485,7 @@ public void setup(File blockFile, boolean usingChecksums) public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { - byte buf[] = new byte[TEST_LENGTH]; + byte[] buf = new byte[TEST_LENGTH]; if (usingChecksums) { try { reader.readFully(buf, 0, 10); @@ -508,7 +525,7 @@ public void setup(File blockFile, boolean usingChecksums) public void doTest(BlockReaderLocal reader, byte original[]) throws IOException { - byte buf[] = new byte[TEST_LENGTH]; + byte[] buf = new byte[TEST_LENGTH]; try { reader.readFully(buf, 0, 10); assertArrayRegionsEqual(original, 0, buf, 0, 10); @@ -845,4 +862,78 @@ public void testStatisticsForErasureCodingRead() throws IOException { } } } + + private static class TestBlockReaderFiveShortCircutCachesReads + extends BlockReaderLocalTest { + @Override + public void doTest(BlockReaderLocal reader, byte[] original, int shift) + throws IOException { + byte[] buf = new byte[TEST_LENGTH]; + reader.readFully(buf, 0, 512); + assertArrayRegionsEqual(original, shift, buf, 0, 512); + reader.readFully(buf, 512, 512); + assertArrayRegionsEqual(original, 512 + shift, buf, 512, 512); + reader.readFully(buf, 1024, 513); + assertArrayRegionsEqual(original, 1024 + shift, buf, 1024, 513); + reader.readFully(buf, 1537, 514); + assertArrayRegionsEqual(original, 1537 + shift, buf, 1537, 514); + // Readahead is always at least the size of one chunk in this test. + Assert.assertTrue(reader.getMaxReadaheadLength() >= + BlockReaderLocalTest.BYTES_PER_CHECKSUM); + } + } + + @Test + public void testBlockReaderFiveShortCircutCachesReads() throws IOException { + runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(), + true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT, + 5); + } + + @Test + public void testBlockReaderFiveShortCircutCachesReadsShortReadahead() + throws IOException { + runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(), + true, BlockReaderLocalTest.BYTES_PER_CHECKSUM - 1, + 5); + } + + @Test + public void testBlockReaderFiveShortCircutCachesReadsNoChecksum() + throws IOException { + runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(), + false, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT, + 5); + } + + @Test + public void testBlockReaderFiveShortCircutCachesReadsNoReadahead() + throws IOException { + runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(), + true, 0, 5); + } + + @Test + public void testBlockReaderFiveShortCircutCachesReadsNoChecksumNoReadahead() + throws IOException { + runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(), + false, 0, 5); + } + + @Test(expected = IllegalArgumentException.class) + public void testBlockReaderShortCircutCachesOutOfRangeBelow() + throws IOException { + runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(), + true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT, + 0); + } + + @Test(expected = IllegalArgumentException.class) + public void testBlockReaderShortCircutCachesOutOfRangeAbove() + throws IOException { + runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(), + true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT, + 555); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java index 6ee2898c0358d..78c908778c6c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.client.impl; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.impl.metrics.BlockReaderIoProvider; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java index 013352394ccce..2d1a9fc73554d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.net; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java index 64ca3faf8ddb1..bec696ca6bb94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import static org.junit.Assert.*; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 07ef539c27c0a..467994142ea11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -35,7 +35,7 @@ import java.util.Iterator; import java.util.List; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; @@ -115,9 +115,9 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.protobuf.ByteString; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java index e3e862f34a5bb..1c5a5dd6b84a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java @@ -28,7 +28,7 @@ import java.util.List; import java.util.concurrent.TimeoutException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -40,8 +40,8 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetUtils; -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.test.GenericTestUtils; public class MiniJournalCluster { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java index 9d95bfd551e10..539f21647a8f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java @@ -42,7 +42,7 @@ import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public abstract class QJMTestUtil { public static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java index a8099cd46c1aa..df2f359409b54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java @@ -36,8 +36,8 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; public class TestEpochsAreUnique { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java index d64968651cab2..f2f46424cfd5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java @@ -40,7 +40,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.base.Supplier; +import java.util.function.Supplier; public class TestIPCLoggerChannel { private static final Logger LOG = LoggerFactory.getLogger( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java index 946358c7a61c4..8ead48f6f8a29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Rule; @@ -61,9 +61,9 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestQJMWithFaults { @@ -225,7 +225,7 @@ public void testRandomized() throws Exception { // If the user specifies a seed, then we should gather all the // IPC trace information so that debugging is easier. This makes // the test run about 25% slower otherwise. - GenericTestUtils.setLogLevel(ProtobufRpcEngine.LOG, Level.ALL); + GenericTestUtils.setLogLevel(ProtobufRpcEngine2.LOG, Level.ALL); } else { seed = new Random().nextLong(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java index 97cf2f3c0684d..31b452ee56962 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java @@ -26,9 +26,9 @@ import org.apache.hadoop.util.FakeTimer; import org.junit.Test; -import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableMap; -import com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; public class TestQuorumCall { @Test(timeout=10000) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index cd0216e2f26d0..251fadf20e1ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.After; @@ -69,7 +69,7 @@ import org.mockito.Mockito; import org.mockito.stubbing.Stubber; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Functional tests for QuorumJournalManager. @@ -87,7 +87,7 @@ public class TestQuorumJournalManager { private final List toClose = Lists.newLinkedList(); static { - GenericTestUtils.setLogLevel(ProtobufRpcEngine.LOG, Level.ALL); + GenericTestUtils.setLogLevel(ProtobufRpcEngine2.LOG, Level.ALL); } @Rule diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java index d9c6ad233f452..2e2fc57ce8899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java @@ -57,11 +57,11 @@ import org.mockito.Mockito; import org.mockito.stubbing.Stubber; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import org.apache.hadoop.thirdparty.protobuf.ByteString; import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeOp; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java index ecd6a2cd46457..dc16b665b5115 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import static org.apache.hadoop.hdfs.qjournal.client.SegmentRecoveryComparator.INSTANCE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java index 5891cec113640..6f0eece37bfbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java @@ -23,7 +23,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import com.google.common.primitives.Bytes; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.File; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 4cc5968e6b58b..6e117b7687ac7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import com.google.common.base.Charsets; -import com.google.common.primitives.Bytes; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java index c23604b998818..ffe20679e38b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java index 9e15d60a5a505..2a178a1547ec4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import com.google.common.primitives.Bytes; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.File; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 4c4c171d72a4e..c548b716f044d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -74,7 +74,7 @@ import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.TestWritable; import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; @@ -314,7 +314,7 @@ private static Server createMockDatanode(BlockTokenSecretManager sm, .getReplicaVisibleLength(any(), any()); RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); BlockingService service = ClientDatanodeProtocolService .newReflectiveBlockingService(mockDN); return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index d4ca119738e1e..f44bbb247b859 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -1018,18 +1018,18 @@ private static int runBalancer(Collection namenodes, for(NameNodeConnector nnc : connectors) { final Balancer b = new Balancer(nnc, p, conf); final Result r = b.runOneIteration(); - r.print(iteration, System.out); + r.print(iteration, nnc, System.out); // clean all lists b.resetData(conf); - if (r.exitStatus == ExitStatus.IN_PROGRESS) { + if (r.getExitStatus() == ExitStatus.IN_PROGRESS) { done = false; - } else if (r.exitStatus != ExitStatus.SUCCESS) { + } else if (r.getExitStatus() != ExitStatus.SUCCESS) { //must be an error statue, return. - return r.exitStatus.getExitCode(); + return r.getExitStatus().getExitCode(); } else { if (iteration > 0) { - assertTrue(r.bytesAlreadyMoved > 0); + assertTrue(r.getBytesAlreadyMoved() > 0); } } } @@ -1608,9 +1608,9 @@ public void testMaxIterationTime() throws Exception { conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); // limit the worker thread count of Balancer to have only 1 queue per DN conf.setInt(DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY, 1); - // limit the bandwitdh to 1 packet per sec to emulate slow block moves + // limit the bandwidth to 4MB per sec to emulate slow block moves conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, - 64 * 1024); + 4 * 1024 * 1024); // set client socket timeout to have an IN_PROGRESS notification back from // the DataNode about the copy in every second. conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2000L); @@ -1641,31 +1641,22 @@ public void testMaxIterationTime() throws Exception { List connectors = Collections.emptyList(); try { BalancerParameters bParams = BalancerParameters.DEFAULT; + // set maxIdleIterations to 1 for NO_MOVE_PROGRESS to be + // reported when there is no block move connectors = NameNodeConnector.newNameNodeConnectors( DFSUtil.getInternalNsRpcUris(conf), Balancer.class.getSimpleName(), - Balancer.BALANCER_ID_PATH, conf, bParams.getMaxIdleIteration()); + Balancer.BALANCER_ID_PATH, conf, 1); for (NameNodeConnector nnc : connectors) { LOG.info("NNC to work on: " + nnc); Balancer b = new Balancer(nnc, bParams, conf); - long startTime = Time.monotonicNow(); Result r = b.runOneIteration(); - long runtime = Time.monotonicNow() - startTime; - assertEquals("We expect ExitStatus.IN_PROGRESS to be reported.", - ExitStatus.IN_PROGRESS, r.exitStatus); - // accept runtime if it is under 3.5 seconds, as we need to wait for - // IN_PROGRESS report from DN, and some spare to be able to finish. - // NOTE: This can be a source of flaky tests, if the box is busy, - // assertion here is based on the following: Balancer is already set - // up, iteration gets the blocks from the NN, and makes the decision - // to move 2 blocks. After that the PendingMoves are scheduled, and - // DataNode heartbeats in for the Balancer every second, iteration is - // two seconds long. This means that it will fail if the setup and the - // heartbeat from the DataNode takes more than 500ms, as the iteration - // should end at the 3rd second from start. As the number of - // operations seems to be pretty low, and all comm happens locally, I - // think the possibility of a failure due to node busyness is low. - assertTrue("Unexpected iteration runtime: " + runtime + "ms > 3.5s", - runtime < 3500); + // Since no block cannot be moved in 2 seconds (i.e., + // 4MB/s * 2s = 8MB < 10MB), NO_MOVE_PROGRESS will be reported. + // When a block move is not canceled in 2 seconds properly and then + // a block is moved unexpectedly, IN_PROGRESS will be reported. + assertEquals("We expect ExitStatus.NO_MOVE_PROGRESS to be reported.", + ExitStatus.NO_MOVE_PROGRESS, r.getExitStatus()); + assertEquals(0, r.getBlocksMoved()); } } finally { for (NameNodeConnector nnc : connectors) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index c604315fb26f9..185df1246d367 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.hdfs.server.balancer; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.times; @@ -31,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -44,7 +49,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.junit.Test; +import org.slf4j.LoggerFactory; /** * Test balancer with HA NameNodes @@ -106,6 +113,12 @@ void doTest(Configuration conf) throws Exception { TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace / numOfDatanodes, (short) numOfDatanodes, 0); + boolean isRequestStandby = conf.getBoolean( + DFS_HA_ALLOW_STALE_READ_KEY, DFS_HA_ALLOW_STALE_READ_DEFAULT); + if (isRequestStandby) { + HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), + cluster.getNameNode(1)); + } // start up an empty node with the same capacity and on the same rack long newNodeCapacity = TestBalancer.CAPACITY; // new node's capacity String newNodeRack = TestBalancer.RACK2; // new node's rack @@ -115,13 +128,54 @@ void doTest(Configuration conf) throws Exception { TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + Collection nsIds = DFSUtilClient.getNameServiceIds(conf); assertEquals(1, namenodes.size()); - final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf); + final int r = Balancer.run(namenodes, nsIds, BalancerParameters.DEFAULT, + conf); assertEquals(ExitStatus.SUCCESS.getExitCode(), r); TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, BalancerParameters.DEFAULT); } + /** + * Test Balancer request Standby NameNode when enable this feature. + */ + @Test(timeout = 60000) + public void testBalancerRequestSBNWithHA() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFS_HA_ALLOW_STALE_READ_KEY, true); + conf.setLong(DFS_HA_TAILEDITS_PERIOD_KEY, 1); + //conf.setBoolean(DFS_HA_BALANCER_REQUEST_STANDBY_KEY, true); + TestBalancer.initConf(conf); + assertEquals(TEST_CAPACITIES.length, TEST_RACKS.length); + NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1"); + nn1Conf.setIpcPort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + Configuration copiedConf = new Configuration(conf); + cluster = new MiniDFSCluster.Builder(copiedConf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(TEST_CAPACITIES.length) + .racks(TEST_RACKS) + .simulatedCapacities(TEST_CAPACITIES) + .build(); + // Try capture NameNodeConnector log. + LogCapturer log =LogCapturer.captureLogs( + LoggerFactory.getLogger(NameNodeConnector.class)); + HATestUtil.setFailoverConfigurations(cluster, conf); + try { + cluster.waitActive(); + cluster.transitionToActive(0); + Thread.sleep(500); + client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), + ClientProtocol.class).getProxy(); + doTest(conf); + // Check getBlocks request to Standby NameNode. + assertTrue(log.getOutput().contains( + "Request #getBlocks to Standby NameNode success.")); + } finally { + cluster.shutdown(); + } + } + /** * Test Balancer with ObserverNodes. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index cafde33394420..40afebff49980 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -146,15 +146,16 @@ private static ExtendedBlock[][] generateBlocks(Suite s, long size } /* wait for one heartbeat */ - static void wait(final ClientProtocol[] clients, + static void wait(final Suite suite, long expectedUsedSpace, long expectedTotalSpace) throws IOException { LOG.info("WAIT expectedUsedSpace=" + expectedUsedSpace + ", expectedTotalSpace=" + expectedTotalSpace); - for(int n = 0; n < clients.length; n++) { + suite.cluster.triggerHeartbeats(); + for(int n = 0; n < suite.clients.length; n++) { int i = 0; for(boolean done = false; !done; ) { - final long[] s = clients[n].getStats(); - done = s[0] == expectedTotalSpace && s[1] == expectedUsedSpace; + final long[] s = suite.clients[n].getStats(); + done = s[0] == expectedTotalSpace && s[1] >= expectedUsedSpace; if (!done) { sleep(100L); if (++i % 100 == 0) { @@ -172,7 +173,7 @@ static void runBalancer(Suite s, LOG.info("BALANCER 0: totalUsed=" + totalUsed + ", totalCapacity=" + totalCapacity + ", avg=" + avg); - wait(s.clients, totalUsed, totalCapacity); + wait(s, totalUsed, totalCapacity); LOG.info("BALANCER 1"); // get storage reports for relevant blockpools so that we can compare @@ -186,7 +187,7 @@ static void runBalancer(Suite s, Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r); LOG.info("BALANCER 2"); - wait(s.clients, totalUsed, totalCapacity); + wait(s, totalUsed, totalCapacity); LOG.info("BALANCER 3"); int i = 0; @@ -530,7 +531,7 @@ private void runTest(final int nNameNodes, String[] racks, LOG.info("RUN_TEST 2: create files"); // fill up the cluster to be 30% full - final long totalUsed = (totalCapacity * s.replication)*3/10; + final long totalUsed = totalCapacity * 3 / 10; final long size = (totalUsed/nNameNodes)/s.replication; for(int n = 0; n < nNameNodes; n++) { createFile(s, n, size); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index caac02c1bf9e7..6088722ef26cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopologyWithNodeGroup; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Assert; import org.junit.Test; @@ -361,4 +362,30 @@ public void testBalancerEndInNoMoveProgress() throws Exception { cluster.shutdown(); } } + + /** + * verify BlockPlacementPolicyNodeGroup uses NetworkTopologyWithNodeGroup. + */ + + @Test + public void testBPPNodeGroup() throws Exception { + Configuration conf = createConf(); + conf.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, true); + long[] capacities = new long[] {CAPACITY, CAPACITY, CAPACITY, CAPACITY}; + String[] racks = new String[] {RACK0, RACK0, RACK1, RACK1}; + String[] nodeGroups = + new String[] {NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2}; + + int numOfDatanodes = capacities.length; + assertEquals(numOfDatanodes, racks.length); + assertEquals(numOfDatanodes, nodeGroups.length); + MiniDFSCluster.Builder builder = + new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length) + .racks(racks).simulatedCapacities(capacities); + MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups); + LambdaTestUtils.intercept(IllegalArgumentException.class, + "Configured cluster topology should be " + + "org.apache.hadoop.net.NetworkTopologyWithNodeGroup", + () -> new MiniDFSClusterWithNodeGroup(builder)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index 4d020d45a62b5..2b8804c12cc6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -36,7 +36,7 @@ import org.apache.hadoop.test.Whitebox; import org.junit.Assert; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; public class BlockManagerTestUtil { public static void setNodeReplicationLimit(final BlockManager blockManager, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 11ed5ba9a33d8..3b46702f4ed29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.LinkedListMultimap; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; @@ -49,9 +49,11 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl; import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.namenode.CacheManager; @@ -1051,6 +1053,57 @@ public void testSafeModeIBRBeforeFirstFullBR() throws Exception { (ds) >= 0); } + @Test + public void testSafeModeWithProvidedStorageBR() throws Exception { + DatanodeDescriptor node0 = spy(nodes.get(0)); + DatanodeStorageInfo ds0 = node0.getStorageInfos()[0]; + node0.setAlive(true); + DatanodeDescriptor node1 = spy(nodes.get(1)); + DatanodeStorageInfo ds1 = node1.getStorageInfos()[0]; + node1.setAlive(true); + + String providedStorageID = DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT; + DatanodeStorage providedStorage = new DatanodeStorage( + providedStorageID, DatanodeStorage.State.NORMAL, StorageType.PROVIDED); + + // create block manager with provided storage enabled + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true); + conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS, + TestProvidedImpl.TestFileRegionBlockAliasMap.class, + BlockAliasMap.class); + BlockManager bmPs = new BlockManager(fsn, false, conf); + bmPs.setBlockPoolId("BP-12344-10.1.1.2-12344"); + + // pretend to be in safemode + doReturn(true).when(fsn).isInStartupSafeMode(); + + // register new node + DatanodeRegistration nodeReg0 = + new DatanodeRegistration(node0, null, null, ""); + bmPs.getDatanodeManager().registerDatanode(nodeReg0); + bmPs.getDatanodeManager().addDatanode(node0); + DatanodeRegistration nodeReg1 = + new DatanodeRegistration(node1, null, null, ""); + bmPs.getDatanodeManager().registerDatanode(nodeReg1); + bmPs.getDatanodeManager().addDatanode(node1); + + // process reports of provided storage and disk storage + bmPs.processReport(node0, providedStorage, BlockListAsLongs.EMPTY, null); + bmPs.processReport(node0, new DatanodeStorage(ds0.getStorageID()), + BlockListAsLongs.EMPTY, null); + bmPs.processReport(node1, providedStorage, BlockListAsLongs.EMPTY, null); + bmPs.processReport(node1, new DatanodeStorage(ds1.getStorageID()), + BlockListAsLongs.EMPTY, null); + + // The provided stoage report should not affect disk storage report + DatanodeStorageInfo dsPs = + bmPs.getProvidedStorageMap().getProvidedStorageInfo(); + assertEquals(2, dsPs.getBlockReportCount()); + assertEquals(1, ds0.getBlockReportCount()); + assertEquals(1, ds1.getBlockReportCount()); + } + @Test public void testFullBR() throws Exception { doReturn(true).when(fsn).isRunning(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java index 964efb1634999..cf4ec6438a564 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java index 20a5fca7b7445..16279bb08837e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java @@ -21,9 +21,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS; -import com.google.common.base.Joiner; -import com.google.common.base.Supplier; -import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java index 0b39456bf8f8e..124db17e6c0df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java @@ -136,7 +136,7 @@ protected boolean isBlockTokenExpired(LocatedBlock lb) throws IOException { LocatedBlock[] internalBlocks = StripedBlockUtil.parseStripedBlockGroup (lsb, cellSize, dataBlocks, parityBlocks); for (LocatedBlock internalBlock : internalBlocks) { - if(super.isBlockTokenExpired(internalBlock)){ + if(internalBlock != null && super.isBlockTokenExpired(internalBlock)) { return true; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java index 9580bae3bab5f..eefc1d5034f4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import java.util.ArrayList; @@ -63,16 +64,16 @@ public void testBlocksCounter() throws Exception { DatanodeStorageInfo[] storages = dd.getStorageInfos(); assertTrue(storages.length > 0); // add first block - assertTrue(storages[0].addBlock(blk) == AddBlockResult.ADDED); + assertEquals(AddBlockResult.ADDED, storages[0].addBlock(blk)); assertEquals(1, dd.numBlocks()); // remove a non-existent block assertFalse(BlocksMap.removeBlock(dd, blk1)); assertEquals(1, dd.numBlocks()); // add an existent block - assertFalse(storages[0].addBlock(blk) == AddBlockResult.ADDED); + assertNotEquals(AddBlockResult.ADDED, storages[0].addBlock(blk)); assertEquals(1, dd.numBlocks()); // add second block - assertTrue(storages[0].addBlock(blk1) == AddBlockResult.ADDED); + assertEquals(AddBlockResult.ADDED, storages[0].addBlock(blk1)); assertEquals(2, dd.numBlocks()); // remove first block assertTrue(BlocksMap.removeBlock(dd, blk)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java index e8e6b949f16b6..cdce754e41fe0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java @@ -28,11 +28,13 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Random; +import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -600,6 +602,245 @@ public void testGetBlockLocationConsiderLoad() assertEquals(locs[0].getIpAddr(), sortedLocs2[4].getIpAddr()); } + @Test + public void testGetBlockLocationConsiderLoadWithNodesOfSameDistance() + throws IOException, URISyntaxException { + Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY, true); + conf.setBoolean( + DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); + FSNamesystem fsn = Mockito.mock(FSNamesystem.class); + Mockito.when(fsn.hasWriteLock()).thenReturn(true); + URL shellScript = getClass() + .getResource("/" + Shell.appendScriptExtension("topology-script")); + Path resourcePath = Paths.get(shellScript.toURI()); + FileUtil.setExecutable(resourcePath.toFile(), true); + conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, + resourcePath.toString()); + DatanodeManager dm = mockDatanodeManager(fsn, conf); + + int totalDNs = 5; + // Register 5 datanodes and 2 nodes per rack with different load. + DatanodeInfo[] locs = new DatanodeInfo[totalDNs]; + String[] storageIDs = new String[totalDNs]; + for (int i = 0; i < totalDNs; i++) { + // Register new datanode. + String uuid = "UUID-" + i; + String ip = "IP-" + i / 2 + "-" + i; + DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class); + Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid); + Mockito.when(dr.getIpAddr()).thenReturn(ip); + dm.registerDatanode(dr); + + // Get location and storage information. + locs[i] = dm.getDatanode(uuid); + storageIDs[i] = "storageID-" + i; + + // Set load for datanodes. + locs[i].setXceiverCount(2); + } + + // Set node 0 decommissioned. + locs[0].setDecommissioned(); + + // Create LocatedBlock with above locations. + ExtendedBlock b = new ExtendedBlock("somePoolID", 1234); + LocatedBlock block = new LocatedBlock(b, locs); + List blocks = new ArrayList<>(); + blocks.add(block); + + // Test client not in cluster but same rack with locs[3]. + // Number of iterations to do the test + int numIterations = 100; + + Set ipSet = new HashSet<>(); + final String targetIpNotInCluster = locs[3].getIpAddr() + "-client"; + for (int i = 0; i < numIterations; i++) { + dm.sortLocatedBlocks(targetIpNotInCluster, blocks); + DatanodeInfo[] sortedLocs = block.getLocations(); + assertEquals(totalDNs, sortedLocs.length); + if (!ipSet.contains(sortedLocs[0].getIpAddr())) { + ipSet.add(sortedLocs[0].getIpAddr()); + } + } + + // when the two nodes' distance and weight are same, they are same close. + assertEquals(2, ipSet.size()); + } + + @Test + public void testGetBlockLocationConsiderStorageType() + throws IOException, URISyntaxException { + Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY, + true); + conf.setBoolean( + DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); + FSNamesystem fsn = Mockito.mock(FSNamesystem.class); + Mockito.when(fsn.hasWriteLock()).thenReturn(true); + URL shellScript = getClass() + .getResource("/" + Shell.appendScriptExtension("topology-script")); + Path resourcePath = Paths.get(shellScript.toURI()); + FileUtil.setExecutable(resourcePath.toFile(), true); + conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, + resourcePath.toString()); + DatanodeManager dm = mockDatanodeManager(fsn, conf); + + int totalDNs = 5; + // Register 5 datanodes and 2 nodes per rack with different load. + DatanodeInfo[] locs = new DatanodeInfo[totalDNs]; + String[] storageIDs = new String[totalDNs]; + List storageTypesList = + new ArrayList<>(Arrays.asList(StorageType.ARCHIVE, StorageType.DISK, + StorageType.SSD, StorageType.DEFAULT, StorageType.SSD)); + StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]); + + for (int i = 0; i < totalDNs; i++) { + // Register new datanode. + String uuid = "UUID-" + i; + String ip = "IP-" + i / 2 + "-" + i; + DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class); + Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid); + Mockito.when(dr.getIpAddr()).thenReturn(ip); + dm.registerDatanode(dr); + + // Get location and storage information. + locs[i] = dm.getDatanode(uuid); + storageIDs[i] = "storageID-" + i; + } + + // Set node 0 decommissioned. + locs[0].setDecommissioned(); + + // Create LocatedBlock with above locations. + ExtendedBlock b = new ExtendedBlock("somePoolID", 1234); + LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes); + List blocks = new ArrayList<>(); + blocks.add(block); + + // Test client located at locs[3] in cluster. + final String targetIpInCluster = locs[3].getIpAddr(); + dm.sortLocatedBlocks(targetIpInCluster, blocks); + DatanodeInfo[] sortedLocs = block.getLocations(); + assertEquals(totalDNs, sortedLocs.length); + // Ensure the local node is first. + assertEquals(targetIpInCluster, sortedLocs[0].getIpAddr()); + // Ensure choose fast storage type node when distance is same. + assertEquals(locs[3].getIpAddr(), sortedLocs[0].getIpAddr()); + assertEquals(locs[2].getIpAddr(), sortedLocs[1].getIpAddr()); + assertEquals(locs[4].getIpAddr(), sortedLocs[2].getIpAddr()); + assertEquals(locs[1].getIpAddr(), sortedLocs[3].getIpAddr()); + // Ensure the two decommissioned DNs were moved to the end. + assertThat(sortedLocs[4].getAdminState(), + is(DatanodeInfo.AdminStates.DECOMMISSIONED)); + assertEquals(locs[0].getIpAddr(), sortedLocs[4].getIpAddr()); + + // Test client not in cluster but same rack with locs[3]. + final String targetIpNotInCluster = locs[3].getIpAddr() + "-client"; + dm.sortLocatedBlocks(targetIpNotInCluster, blocks); + DatanodeInfo[] sortedLocs2 = block.getLocations(); + assertEquals(totalDNs, sortedLocs2.length); + // Ensure the local rack is first and choose fast storage type node + // when distance is same. + assertEquals(locs[2].getIpAddr(), sortedLocs2[0].getIpAddr()); + assertEquals(locs[3].getIpAddr(), sortedLocs2[1].getIpAddr()); + assertEquals(locs[4].getIpAddr(), sortedLocs2[2].getIpAddr()); + assertEquals(locs[1].getIpAddr(), sortedLocs2[3].getIpAddr()); + // Ensure the two decommissioned DNs were moved to the end. + assertThat(sortedLocs[4].getAdminState(), + is(DatanodeInfo.AdminStates.DECOMMISSIONED)); + assertEquals(locs[0].getIpAddr(), sortedLocs2[4].getIpAddr()); + } + + @Test + public void testGetBlockLocationConsiderStorageTypeAndLoad() + throws IOException, URISyntaxException { + Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERSTORAGETYPE_KEY, + true); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY, true); + conf.setBoolean( + DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); + FSNamesystem fsn = Mockito.mock(FSNamesystem.class); + Mockito.when(fsn.hasWriteLock()).thenReturn(true); + URL shellScript = getClass() + .getResource("/" + Shell.appendScriptExtension("topology-script")); + Path resourcePath = Paths.get(shellScript.toURI()); + FileUtil.setExecutable(resourcePath.toFile(), true); + conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, + resourcePath.toString()); + DatanodeManager dm = mockDatanodeManager(fsn, conf); + + int totalDNs = 5; + // Register 5 datanodes and 2 nodes per rack with different load. + DatanodeInfo[] locs = new DatanodeInfo[totalDNs]; + String[] storageIDs = new String[totalDNs]; + List storageTypesList = + new ArrayList<>(Arrays.asList(StorageType.DISK, StorageType.DISK, + StorageType.DEFAULT, StorageType.SSD, StorageType.SSD)); + StorageType[] storageTypes = storageTypesList.toArray(new StorageType[0]); + + for (int i = 0; i < totalDNs; i++) { + // Register new datanode. + String uuid = "UUID-" + i; + String ip = "IP-" + i / 2 + "-" + i; + DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class); + Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid); + Mockito.when(dr.getIpAddr()).thenReturn(ip); + dm.registerDatanode(dr); + + // Get location and storage information. + locs[i] = dm.getDatanode(uuid); + storageIDs[i] = "storageID-" + i; + + // Set load for datanodes. + locs[i].setXceiverCount(i); + } + + // Set node 0 decommissioned. + locs[0].setDecommissioned(); + + // Create LocatedBlock with above locations. + ExtendedBlock b = new ExtendedBlock("somePoolID", 1234); + LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes); + List blocks = new ArrayList<>(); + blocks.add(block); + + // Test client located at locs[3] in cluster. + final String targetIpInCluster = locs[3].getIpAddr(); + dm.sortLocatedBlocks(targetIpInCluster, blocks); + DatanodeInfo[] sortedLocs = block.getLocations(); + assertEquals(totalDNs, sortedLocs.length); + // Ensure the local node is first. + assertEquals(targetIpInCluster, sortedLocs[0].getIpAddr()); + // Ensure choose the light weight node between light weight and fast storage + // type node when distance is same. + assertEquals(locs[3].getIpAddr(), sortedLocs[0].getIpAddr()); + assertEquals(locs[2].getIpAddr(), sortedLocs[1].getIpAddr()); + assertEquals(locs[1].getIpAddr(), sortedLocs[2].getIpAddr()); + assertEquals(locs[4].getIpAddr(), sortedLocs[3].getIpAddr()); + // Ensure the two decommissioned DNs were moved to the end. + assertThat(sortedLocs[4].getAdminState(), + is(DatanodeInfo.AdminStates.DECOMMISSIONED)); + assertEquals(locs[0].getIpAddr(), sortedLocs[4].getIpAddr()); + + // Test client not in cluster but same rack with locs[3]. + final String targetIpNotInCluster = locs[3].getIpAddr() + "-client"; + dm.sortLocatedBlocks(targetIpNotInCluster, blocks); + DatanodeInfo[] sortedLocs2 = block.getLocations(); + assertEquals(totalDNs, sortedLocs2.length); + // Ensure the local rack is first and choose the light weight node between + // light weight and fast storage type node when distance is same. + assertEquals(locs[2].getIpAddr(), sortedLocs2[0].getIpAddr()); + assertEquals(locs[3].getIpAddr(), sortedLocs2[1].getIpAddr()); + assertEquals(locs[1].getIpAddr(), sortedLocs2[2].getIpAddr()); + assertEquals(locs[4].getIpAddr(), sortedLocs2[3].getIpAddr()); + // Ensure the two decommissioned DNs were moved to the end. + assertThat(sortedLocs[4].getAdminState(), + is(DatanodeInfo.AdminStates.DECOMMISSIONED)); + assertEquals(locs[0].getIpAddr(), sortedLocs2[4].getIpAddr()); + } + /** * Test whether removing a host from the includes list without adding it to * the excludes list will exclude it from data node reports. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestErasureCodingCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestErasureCodingCorruption.java new file mode 100644 index 0000000000000..cb1ef2becb99f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestErasureCodingCorruption.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Test; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CORRUPT_BLOCK_DELETE_IMMEDIATELY_ENABLED; + +/** + * In case of Erasure Coding the entire block group is marked corrupted, in + * case there is a failure during write, the replica when reported to + * Namenode marks the block group as corrupt, but since the file is complete + * and the replica has a lower gen stamp, the replica is marked to be + * deleted, but in case the replica is on stale storage(Namenode marks + * datanodes as stale in case of failover, etc), then the deletion of block + * is postponed and hence the block group isn't removed from the corrupt + * replica map. So, in case of striped blocks, if the replica is suppose to + * get deleted, explicitly remove the block group from corrupt-replica map. + */ +public class TestErasureCodingCorruption { + + @Test + public void testCorruptionDuringFailover() throws Exception { + Configuration conf = new Configuration(); + // Set removal of corrupt replicas immediately as false, to trigger this + // case. + conf.setBoolean(DFS_NAMENODE_CORRUPT_BLOCK_DELETE_IMMEDIATELY_ENABLED, + false); + try (MiniDFSCluster cluster = new MiniDFSCluster + .Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(8) + .build()) { + cluster.transitionToActive(0); + cluster.waitActive(); + + DistributedFileSystem dfs = cluster.getFileSystem(0); + dfs.mkdirs(new Path("/dir")); + dfs.setErasureCodingPolicy(new Path("/dir"), "RS-6-3-1024k"); + + FSDataOutputStream out = dfs.create(new Path("/dir/file")); + // Write more than one stripe, so that data can get flushed to all + // datanodes. + for (int i = 0; i < 15 * 1024 * 1024; i++) { + out.write(i); + } + + // Stop one datanode, so as to trigger update pipeline. + MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0); + // Write some more data and close the file. + for (int i = 0; i < 7 * 1024 * 1024; i++) { + out.write(i); + } + out.close(); + + BlockManager bm = cluster.getNamesystem(0).getBlockManager(); + + // Transition to standby and then to active. + cluster.transitionToStandby(0); + cluster.transitionToActive(0); + + // Restart the stopped Datanode, this datanode would report a replica + // that failed during write. + cluster.restartDataNode(dn); + GenericTestUtils + .waitFor(() -> bm.getCorruptECBlockGroups() == 0, 100, 10000); + } + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java index 000c636716c98..e33e24fe28559 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java @@ -21,6 +21,7 @@ import java.util.Collection; import java.util.Iterator; import java.util.List; +import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; @@ -41,6 +42,7 @@ public class TestLowRedundancyBlockQueues { private final ErasureCodingPolicy ecPolicy; + private static AtomicLong mockINodeId = new AtomicLong(0); public TestLowRedundancyBlockQueues(ErasureCodingPolicy policy) { ecPolicy = policy; @@ -52,7 +54,15 @@ public static Collection policies() { } private BlockInfo genBlockInfo(long id) { - return new BlockInfoContiguous(new Block(id), (short) 3); + return genBlockInfo(id, false); + } + + private BlockInfo genBlockInfo(long id, boolean isCorruptBlock) { + BlockInfo bInfo = new BlockInfoContiguous(new Block(id), (short) 3); + if (!isCorruptBlock) { + bInfo.setBlockCollectionId(mockINodeId.incrementAndGet()); + } + return bInfo; } private BlockInfo genStripedBlockInfo(long id, long numBytes) { @@ -93,6 +103,41 @@ private void verifyBlockStats(LowRedundancyBlocks queues, queues.getHighestPriorityECBlockCount()); } + /** + * Tests that deleted blocks should not be returned by + * {@link LowRedundancyBlocks#chooseLowRedundancyBlocks(int, boolean)}. + * @throws Exception + */ + @Test + public void testDeletedBlocks() throws Exception { + int numBlocks = 5; + LowRedundancyBlocks queues = new LowRedundancyBlocks(); + // create 5 blockinfos. The first one is corrupt. + for (int ind = 0; ind < numBlocks; ind++) { + BlockInfo blockInfo = genBlockInfo(ind, ind == 0); + queues.add(blockInfo, 2, 0, 0, 3); + } + List> blocks; + // Get two blocks from the queue, but we should only get one because first + // block is deleted. + blocks = queues.chooseLowRedundancyBlocks(2, false); + + assertEquals(1, blocks.get(2).size()); + assertEquals(1, blocks.get(2).get(0).getBlockId()); + + // Get the next blocks - should be ID 2 + blocks = queues.chooseLowRedundancyBlocks(1, false); + assertEquals(2, blocks.get(2).get(0).getBlockId()); + + // Get the next block, but also reset this time - should be ID 3 returned + blocks = queues.chooseLowRedundancyBlocks(1, true); + assertEquals(3, blocks.get(2).get(0).getBlockId()); + + // Get one more block and due to resetting the queue it will be block id 1 + blocks = queues.chooseLowRedundancyBlocks(1, false); + assertEquals(1, blocks.get(2).get(0).getBlockId()); + } + @Test public void testQueuePositionCanBeReset() throws Throwable { LowRedundancyBlocks queues = new LowRedundancyBlocks(); @@ -276,4 +321,15 @@ private void assertInLevel(LowRedundancyBlocks queues, } fail("Block " + block + " not found in level " + level); } + + @Test + public void testRemoveBlockInManyQueues() { + LowRedundancyBlocks neededReconstruction = new LowRedundancyBlocks(); + BlockInfo block = new BlockInfoContiguous(new Block(), (short)1024); + neededReconstruction.add(block, 2, 0, 1, 3); + neededReconstruction.add(block, 0, 0, 0, 3); + neededReconstruction.remove(block, LowRedundancyBlocks.LEVEL); + assertFalse("Should not contain the block.", + neededReconstruction.contains(block)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java index f64767afe26bd..a73fd69f64d88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.util.ArrayList; import java.util.Collection; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java index 22e91765fce1c..ebc073d53dc45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.junit.Test; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; public class TestPendingDataNodeMessages { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java index aad846822b549..c298761719e51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java @@ -40,7 +40,7 @@ import org.mockito.Mockito; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Test if we can correctly delay the deletion of blocks. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java index d52207ede377d..41303f101dd40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java @@ -31,7 +31,7 @@ import java.util.ArrayList; import java.util.concurrent.TimeoutException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java index 9c48eeb8733ba..541db85f87857 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java @@ -46,8 +46,8 @@ import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Test when RBW block is removed. Invalidation of the corrupted block happens diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 9c3a630f9535e..78629fe9903ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -38,6 +38,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.AddBlockFlag; @@ -82,7 +83,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest { // The interval for marking a datanode as stale, private static final long staleInterval = DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT; - + private static AtomicLong mockINodeId = new AtomicLong(0); @Rule public ExpectedException exception = ExpectedException.none(); @@ -825,7 +826,15 @@ public void testRereplicate3() throws Exception { } private BlockInfo genBlockInfo(long id) { - return new BlockInfoContiguous(new Block(id), (short) 3); + return genBlockInfo(id, false); + } + + private BlockInfo genBlockInfo(long id, boolean isBlockCorrupted) { + BlockInfo bInfo = new BlockInfoContiguous(new Block(id), (short) 3); + if (!isBlockCorrupted) { + bInfo.setBlockCollectionId(mockINodeId.incrementAndGet()); + } + return bInfo; } /** @@ -848,7 +857,7 @@ public void testReplicationWithPriority() throws Exception { // Adding the blocks directly to normal priority neededReconstruction.add(genBlockInfo(ThreadLocalRandom.current(). - nextLong()), 2, 0, 0, 3); + nextLong(), true), 2, 0, 0, 3); } // Lets wait for the replication interval, to start process normal // priority blocks @@ -856,7 +865,7 @@ public void testReplicationWithPriority() throws Exception { // Adding the block directly to high priority list neededReconstruction.add(genBlockInfo(ThreadLocalRandom.current(). - nextLong()), 1, 0, 0, 3); + nextLong(), true), 1, 0, 0, 3); // Lets wait for the replication interval Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java index 63f37b99f4836..9fda398b43508 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java @@ -27,7 +27,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hdfs.DFSConfigKeys .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY; @@ -45,8 +45,8 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.FakeTimer; -import com.google.common.base.Supplier; -import com.google.common.collect.Maps; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.junit.Before; import org.junit.Rule; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java index bd78a50da9700..34bc616e540b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java @@ -243,6 +243,31 @@ public void doFilter(ServletRequest servletRequest, filter.destroy(); } + /** + * Test acceptable behavior to malformed requests + * Case: the request URI does not start with "/webhdfs/v1" + */ + @Test + public void testInvalidURI() throws Exception { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Mockito.when(request.getMethod()).thenReturn("GET"); + Mockito.when(request.getRequestURI()).thenReturn("/InvalidURI"); + HttpServletResponse response = Mockito.mock(HttpServletResponse.class); + + Filter filter = new HostRestrictingAuthorizationFilter(); + HashMap configs = new HashMap() {}; + configs.put(AuthenticationFilter.AUTH_TYPE, "simple"); + FilterConfig fc = new DummyFilterConfig(configs); + + filter.init(fc); + filter.doFilter(request, response, + (servletRequest, servletResponse) -> {}); + Mockito.verify(response, Mockito.times(1)) + .sendError(Mockito.eq(HttpServletResponse.SC_NOT_FOUND), + Mockito.anyString()); + filter.destroy(); + } + private static class DummyFilterConfig implements FilterConfig { final Map map; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java index 1e42a8844f5c8..8971730707bfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java @@ -16,8 +16,8 @@ */ package org.apache.hadoop.hdfs.server.common.blockaliasmap.impl; -import com.google.common.collect.Lists; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java index 534bc36c65879..91e07cfa4c8e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.hdfs.server.common.blockaliasmap.impl; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index e06be41773463..f500ae0fe49cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -37,7 +37,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.slf4j.Logger; @@ -320,7 +319,7 @@ public void blockReport_02() throws IOException { } } - waitTil(TimeUnit.SECONDS.toMillis(DN_RESCAN_EXTRA_WAIT)); + DataNodeTestUtils.runDirectoryScanner(dn0); // all blocks belong to the same file, hence same BP String poolId = cluster.getNamesystem().getBlockPoolId(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 453e04f8f1ea6..dba5a146f0c49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.test.GenericTestUtils; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; @@ -239,7 +239,7 @@ public static FsVolumeImpl getVolume(DataNode dn, File basePath) throws try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset() .getFsVolumeReferences()) { for (FsVolumeSpi vol : volumes) { - if (vol.getBaseURI().equals(basePath.toURI())) { + if (new File(vol.getBaseURI()).equals(basePath)) { return (FsVolumeImpl) vol; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java index 867d6c9215534..987b74df47b8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java index 97302b55a2ee5..30fee2fddd99b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java @@ -46,7 +46,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 13ad16979ce17..d911de69a964d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -40,7 +40,7 @@ import javax.management.ObjectName; import javax.management.StandardMBean; -import com.google.common.math.LongMath; +import org.apache.hadoop.thirdparty.com.google.common.math.LongMath; import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; @@ -1507,7 +1507,7 @@ public StorageReport[] getStorageReports(String bpid) { } @Override - public List getFinalizedBlocks(String bpid) { + public List getSortedFinalizedBlocks(String bpid) { throw new UnsupportedOperationException(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index a305b81b6a1cc..9b47884aec8d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -89,9 +89,9 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; public class TestBPOfferService { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 6ddd880d8e9d6..ee522725e281e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -61,7 +61,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -117,7 +117,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * This tests if sync all replicas in block recovery works correctly. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java index a7d325e7eb83a..b34b7df0a92e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND; import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS; @@ -25,6 +27,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; import java.io.Closeable; import java.io.File; @@ -36,9 +39,10 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.AppendTestUtil; @@ -94,9 +98,19 @@ private static class TestContext implements Closeable { TestContext(Configuration conf, int numNameServices) throws Exception { this.numNameServices = numNameServices; File basedir = new File(GenericTestUtils.getRandomizedTempPath()); + long volumeScannerTimeOutFromConf = + conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY, -1); + long expectedVScannerTimeOut = + volumeScannerTimeOutFromConf == -1 + ? MiniDFSCluster.DEFAULT_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC + : volumeScannerTimeOutFromConf; MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf, basedir). numDataNodes(1). storagesPerDatanode(1); + // verify that the builder was initialized to get the default + // configuration designated for Junit tests. + assertEquals(expectedVScannerTimeOut, + conf.getLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY, -1)); if (numNameServices > 1) { bld.nnTopology(MiniDFSNNTopology. simpleFederatedTopology(numNameServices)); @@ -974,4 +988,170 @@ public Boolean get() { info.blocksScanned = 0; } } + + @Test + public void testSkipRecentAccessFile() throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(DFS_BLOCK_SCANNER_SKIP_RECENT_ACCESSED, true); + conf.setLong(INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS, 2000L); + conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER, + TestScanResultHandler.class.getName()); + final TestContext ctx = new TestContext(conf, 1); + final int totalBlocks = 5; + ctx.createFiles(0, totalBlocks, 4096); + + final TestScanResultHandler.Info info = + TestScanResultHandler.getInfo(ctx.volumes.get(0)); + synchronized (info) { + info.shouldRun = true; + info.notify(); + } + try { + GenericTestUtils.waitFor(() -> { + synchronized (info) { + return info.blocksScanned > 0; + } + }, 10, 500); + fail("Scan nothing for all files are accessed in last period."); + } catch (TimeoutException e) { + LOG.debug("Timeout for all files are accessed in last period."); + } + synchronized (info) { + info.shouldRun = false; + info.notify(); + } + assertEquals("Should not scan block accessed in last period", + 0, info.blocksScanned); + ctx.close(); + } + + /** + * Test a DN does not wait for the VolumeScanners to finish before shutting + * down. + * + * @throws Exception + */ + @Test(timeout=120000) + public void testFastDatanodeShutdown() throws Exception { + // set the joinTimeOut to a value smaller than the completion time of the + // VolumeScanner. + testDatanodeShutDown(50L, 1000L, true); + } + + /** + * Test a DN waits for the VolumeScanners to finish before shutting down. + * + * @throws Exception + */ + @Test(timeout=120000) + public void testSlowDatanodeShutdown() throws Exception { + // Set the joinTimeOut to a value larger than the completion time of the + // volume scanner + testDatanodeShutDown(TimeUnit.MINUTES.toMillis(5), 1000L, + false); + } + + private void testDatanodeShutDown(final long joinTimeOutMS, + final long delayMS, boolean isFastShutdown) throws Exception { + VolumeScannerCBInjector prevVolumeScannerCBInject = + VolumeScannerCBInjector.get(); + try { + DelayVolumeScannerResponseToInterrupt injectDelay = + new DelayVolumeScannerResponseToInterrupt(delayMS); + VolumeScannerCBInjector.set(injectDelay); + Configuration conf = new Configuration(); + conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L); + conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER, + TestScanResultHandler.class.getName()); + conf.setLong(INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS, 0L); + conf.setLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY, + joinTimeOutMS); + final TestContext ctx = new TestContext(conf, 1); + final int numExpectedBlocks = 10; + ctx.createFiles(0, numExpectedBlocks, 1); + final TestScanResultHandler.Info info = + TestScanResultHandler.getInfo(ctx.volumes.get(0)); + synchronized (info) { + info.sem = new Semaphore(5); + info.shouldRun = true; + info.notify(); + } + // make sure that the scanners are doing progress + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + synchronized (info) { + return info.blocksScanned >= 1; + } + } + }, 3, 30000); + // mark the time where the + long startShutdownTime = Time.monotonicNow(); + ctx.datanode.shutdown(); + long endShutdownTime = Time.monotonicNow(); + long totalTimeShutdown = endShutdownTime - startShutdownTime; + + if (isFastShutdown) { + assertTrue("total shutdown time of DN must be smaller than " + + "VolumeScanner Response time: " + totalTimeShutdown, + totalTimeShutdown < delayMS + && totalTimeShutdown >= joinTimeOutMS); + // wait for scanners to terminate before we move to the next test. + injectDelay.waitForScanners(); + return; + } + assertTrue("total shutdown time of DN must be larger than " + + "VolumeScanner Response time: " + totalTimeShutdown, + totalTimeShutdown >= delayMS + && totalTimeShutdown < joinTimeOutMS); + } finally { + // restore the VolumeScanner callback injector. + VolumeScannerCBInjector.set(prevVolumeScannerCBInject); + } + } + + private static class DelayVolumeScannerResponseToInterrupt extends + VolumeScannerCBInjector { + final private long delayAmountNS; + final private Set scannersToShutDown; + + DelayVolumeScannerResponseToInterrupt(long delayMS) { + delayAmountNS = + TimeUnit.NANOSECONDS.convert(delayMS, TimeUnit.MILLISECONDS); + scannersToShutDown = ConcurrentHashMap.newKeySet(); + } + + @Override + public void preSavingBlockIteratorTask(VolumeScanner volumeScanner) { + long remainingTimeNS = delayAmountNS; + // busy delay without sleep(). + long startTime = Time.monotonicNowNanos(); + long endTime = startTime + remainingTimeNS; + long currTime, waitTime = 0; + while ((currTime = Time.monotonicNowNanos()) < endTime) { + // empty loop. No need to sleep because the thread could be in an + // interrupt mode. + waitTime = currTime - startTime; + } + LOG.info("VolumeScanner {} finished delayed Task after {}", + volumeScanner.toString(), + TimeUnit.NANOSECONDS.convert(waitTime, TimeUnit.MILLISECONDS)); + } + + @Override + public void shutdownCallBack(VolumeScanner volumeScanner) { + scannersToShutDown.add(volumeScanner); + } + + @Override + public void terminationCallBack(VolumeScanner volumeScanner) { + scannersToShutDown.remove(volumeScanner); + } + + public void waitForScanners() throws TimeoutException, + InterruptedException { + GenericTestUtils.waitFor( + () -> scannersToShutDown.isEmpty(), 10, 120000); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java index ccd146d0f6bdf..a71dbdbd30c5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index d98047f47fa4e..7efae8808c55b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.fs.BlockLocation; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java index dcfbf027370e9..283b8bd6d2570 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java @@ -71,7 +71,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Test suite covering lifeline protocol handling in the DataNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java index c0d3a4944bd4f..73f664b98036e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java @@ -28,7 +28,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index b4e26405ca0ba..85664ebc954d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -27,14 +27,15 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InputStream; import java.lang.management.ManagementFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.List; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import net.jcip.annotations.NotThreadSafe; import org.apache.hadoop.hdfs.MiniDFSNNTopology; @@ -374,6 +375,57 @@ public void testDatanodeActiveXceiversCount() throws Exception { } } + @Test + public void testDataNodeMXBeanActiveThreadCount() throws Exception { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + FileSystem fs = cluster.getFileSystem(); + Path p = new Path("/testfile"); + + try { + List datanodes = cluster.getDataNodes(); + assertEquals(1, datanodes.size()); + DataNode datanode = datanodes.get(0); + + // create a xceiver thread for write + FSDataOutputStream os = fs.create(p); + for (int i = 0; i < 1024; i++) { + os.write("testdatastr".getBytes()); + } + os.hsync(); + // create a xceiver thread for read + InputStream is = fs.open(p); + is.read(new byte[16], 0, 4); + + int threadCount = datanode.threadGroup.activeCount(); + assertTrue(threadCount > 0); + Thread[] threads = new Thread[threadCount]; + datanode.threadGroup.enumerate(threads); + int xceiverCount = 0; + int responderCount = 0; + int recoveryWorkerCount = 0; + for (Thread t : threads) { + if (t.getName().contains("DataXceiver for client")) { + xceiverCount++; + } else if (t.getName().contains("PacketResponder")) { + responderCount++; + } + } + assertEquals(2, xceiverCount); + assertEquals(1, responderCount); + assertEquals(0, recoveryWorkerCount); //not easy to produce + assertEquals(xceiverCount, datanode.getXceiverCount()); + assertEquals(xceiverCount + responderCount + recoveryWorkerCount, + datanode.getActiveTransferThreadCount()); + + is.close(); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + @Test public void testDNShouldNotDeleteBlockONTooManyOpenFiles() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 4c443afc19ee1..8443c36835489 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -54,7 +54,7 @@ import org.junit.Test; import org.junit.rules.Timeout; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Test periodic logging of DataNode metrics. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index 3a71d6bf3b564..5167042f9be35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -30,7 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodePeerMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodePeerMetrics.java index 3caf24d83fc80..41fb41f48005a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodePeerMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodePeerMetrics.java @@ -17,17 +17,24 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodePeerMetrics; import org.apache.hadoop.metrics2.lib.MetricsTestHelper; +import org.apache.hadoop.metrics2.lib.MutableRollingAverages; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.conf.Configuration; import org.junit.Test; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PEER_METRICS_MIN_OUTLIER_DETECTION_SAMPLES_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY; import static org.hamcrest.CoreMatchers.containsString; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; /** @@ -43,7 +50,7 @@ public void testGetSendPacketDownstreamAvgInfo() throws Exception { final int numOpsPerIteration = 1000; final Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY, true); + conf.setBoolean(DFS_DATANODE_PEER_STATS_ENABLED_KEY, true); final DataNodePeerMetrics peerMetrics = DataNodePeerMetrics.create( "Sample-DataNode", conf); @@ -80,6 +87,59 @@ public void testGetSendPacketDownstreamAvgInfo() throws Exception { } } + @Test(timeout = 30000) + public void testRemoveStaleRecord() throws Exception { + final int numWindows = 5; + final long scheduleInterval = 1000; + final int iterations = 3; + final int numSamples = 100; + + final Configuration conf = new HdfsConfiguration(); + conf.setLong(DFS_DATANODE_PEER_METRICS_MIN_OUTLIER_DETECTION_SAMPLES_KEY, + numSamples); + conf.setBoolean(DFS_DATANODE_PEER_STATS_ENABLED_KEY, true); + + final DataNodePeerMetrics peerMetrics = + DataNodePeerMetrics.create("Sample-DataNode", conf); + MutableRollingAverages rollingAverages = + peerMetrics.getSendPacketDownstreamRollingAverages(); + rollingAverages.setRecordValidityMs(numWindows * scheduleInterval); + MetricsTestHelper.replaceRollingAveragesScheduler(rollingAverages, + numWindows, scheduleInterval, TimeUnit.MILLISECONDS); + + List peerAddrList = new ArrayList<>(); + for (int i = 1; i <= iterations; i++) { + peerAddrList.add(genPeerAddress()); + } + for (String peerAddr : peerAddrList) { + for (int j = 1; j <= numSamples; j++) { + /* simulate to get latency of 1 to 1000 ms */ + final long latency = ThreadLocalRandom.current().nextLong(1, 1000); + peerMetrics.addSendPacketDownstream(peerAddr, latency); + } + } + + GenericTestUtils.waitFor( + () -> rollingAverages.getStats(numSamples).size() > 0, 500, 5000); + assertEquals(3, rollingAverages.getStats(numSamples).size()); + /* wait for stale report to be removed */ + GenericTestUtils.waitFor( + () -> rollingAverages.getStats(numSamples).isEmpty(), 500, 10000); + assertEquals(0, rollingAverages.getStats(numSamples).size()); + + /* dn can report peer metrics normally when it added back to cluster */ + for (String peerAddr : peerAddrList) { + for (int j = 1; j <= numSamples; j++) { + /* simulate to get latency of 1 to 1000 ms */ + final long latency = ThreadLocalRandom.current().nextLong(1, 1000); + peerMetrics.addSendPacketDownstream(peerAddr, latency); + } + } + GenericTestUtils.waitFor( + () -> rollingAverages.getStats(numSamples).size() > 0, 500, 10000); + assertEquals(3, rollingAverages.getStats(numSamples).size()); + } + /** * Simulates to generate different peer addresses, e.g. [84.125.113.65:9801]. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index c6f4f01d89af0..e15baacac843f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -91,7 +91,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Fine-grain testing of block files and locations after volume failure. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java index 93f9f44b04484..905cc2a1ecc19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java @@ -29,7 +29,7 @@ import java.util.ArrayList; import java.util.concurrent.ThreadLocalRandom; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index bffdaae369771..fc11b9ae5bb23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -33,7 +33,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.junit.Assert; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index 0a589a9ea7a50..64c5a5b93aa48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -28,7 +28,7 @@ import java.net.Socket; import java.util.concurrent.TimeUnit; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java index 9d5cde3e744da..106c515d49c08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java @@ -51,7 +51,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; /** * Test proper {@link BlockManager} replication counting for {@link DatanodeStorage}s diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java index f8594ca7ebace..0899dc6561893 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java @@ -26,14 +26,16 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NSConf; import org.junit.Test; -import com.google.common.base.Joiner; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * Tests datanode refresh namenode list functionality. @@ -93,4 +95,36 @@ public void testRefreshNamenodes() throws IOException { } } } + + @Test(timeout=10000) + public void testRefreshNameNodeDeadLock() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + + DataNodeFaultInjector.set(new DataNodeFaultInjector() { + @Override + public void delayWhenOfferServiceHoldLock() { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + }); + + DataNode dn = cluster.getDataNodes().get(0); + Configuration dnConf = dn.getConf(); + dnConf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1"); + dnConf.set(DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY + ".ns1", + "mock:8022"); + dn.refreshNamenodes(dnConf); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java index 9b0636fada19e..fd05a4702fef1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode.checker; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.hdfs.HdfsConfiguration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java index b2ecd83baecc0..318f8b2734150 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode.checker; -import com.google.common.base.Supplier; -import com.google.common.util.concurrent.ListenableFuture; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.FakeTimer; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java index 926747d457315..dac55506eefb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.hdfs.server.datanode.checker; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.util.FakeTimer; import org.junit.Before; import org.junit.Rule; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingTestHelper.java new file mode 100644 index 0000000000000..da571055109f9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingTestHelper.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode.erasurecode; + +import org.apache.hadoop.io.ByteBufferPool; + +public final class ErasureCodingTestHelper { + + private ErasureCodingTestHelper() { } + + public static ByteBufferPool getBufferPool() { + return StripedReconstructor.getBufferPool(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java index caaa89c25e06c..13b740e08cf3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java @@ -90,7 +90,7 @@ public Map getVolumeInfoMap() { } @Override - public List getFinalizedBlocks(String bpid) { + public List getSortedFinalizedBlocks(String bpid) { return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index e383e07246bab..cf35ba99fd085 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.io.FileExistsException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java index 35f70229a6574..cb26533229c32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java @@ -45,7 +45,7 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java index ea31612dfbe45..9575028944997 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java @@ -62,8 +62,8 @@ import org.junit.Test; import org.slf4j.event.Level; -import com.google.common.base.Supplier; -import com.google.common.primitives.Ints; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java index ae42bd8994e51..8b1a6c0814ca8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java @@ -91,8 +91,8 @@ import org.junit.Test; import org.slf4j.event.Level; -import com.google.common.base.Supplier; -import com.google.common.primitives.Ints; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 29e533c32cea1..6489cd079adb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; -import java.io.FileInputStream; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Paths; @@ -80,6 +79,7 @@ import java.io.Writer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.HashSet; import java.util.List; @@ -106,6 +106,8 @@ import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import org.slf4j.Logger; @@ -268,16 +270,24 @@ public void testAddVolumeWithSameStorageUuid() throws IOException { } @Test(timeout = 30000) - public void testRemoveVolumes() throws IOException { + public void testRemoveOneVolume() throws IOException { // Feed FsDataset with block metadata. - final int NUM_BLOCKS = 100; - for (int i = 0; i < NUM_BLOCKS; i++) { - String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length]; + final int numBlocks = 100; + for (int i = 0; i < numBlocks; i++) { + String bpid = BLOCK_POOL_IDS[numBlocks % BLOCK_POOL_IDS.length]; ExtendedBlock eb = new ExtendedBlock(bpid, i); - try (ReplicaHandler replica = - dataset.createRbw(StorageType.DEFAULT, null, eb, false)) { + ReplicaHandler replica = null; + try { + replica = dataset.createRbw(StorageType.DEFAULT, null, eb, + false); + } finally { + if (replica != null) { + replica.close(); + } } } + + // Remove one volume final String[] dataDirs = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(","); final String volumePathToRemove = dataDirs[0]; @@ -300,6 +310,11 @@ public void testRemoveVolumes() throws IOException { assertEquals("The volume has been removed from the storageMap.", expectedNumVolumes, dataset.storageMap.size()); + // DataNode.notifyNamenodeDeletedBlock() should be called 50 times + // as we deleted one volume that has 50 blocks + verify(datanode, times(50)) + .notifyNamenodeDeletedBlock(any(), any()); + try { dataset.asyncDiskService.execute(volumeToRemove, new Runnable() { @@ -317,10 +332,81 @@ public void run() {} totalNumReplicas += dataset.volumeMap.size(bpid); } assertEquals("The replica infos on this volume has been removed from the " - + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, + + "volumeMap.", numBlocks / NUM_INIT_VOLUMES, totalNumReplicas); } + @Test(timeout = 30000) + public void testRemoveTwoVolumes() throws IOException { + // Feed FsDataset with block metadata. + final int numBlocks = 100; + for (int i = 0; i < numBlocks; i++) { + String bpid = BLOCK_POOL_IDS[numBlocks % BLOCK_POOL_IDS.length]; + ExtendedBlock eb = new ExtendedBlock(bpid, i); + ReplicaHandler replica = null; + try { + replica = dataset.createRbw(StorageType.DEFAULT, null, eb, + false); + } finally { + if (replica != null) { + replica.close(); + } + } + } + + // Remove two volumes + final String[] dataDirs = + conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(","); + Set volumesToRemove = new HashSet<>(); + volumesToRemove.add(StorageLocation.parse(dataDirs[0])); + volumesToRemove.add(StorageLocation.parse(dataDirs[1])); + + FsVolumeReferences volReferences = dataset.getFsVolumeReferences(); + Set volumes = new HashSet<>(); + for (FsVolumeSpi vol: volReferences) { + for (StorageLocation volume : volumesToRemove) { + if (vol.getStorageLocation().equals(volume)) { + volumes.add((FsVolumeImpl) vol); + } + } + } + assertEquals(2, volumes.size()); + volReferences.close(); + + dataset.removeVolumes(volumesToRemove, true); + int expectedNumVolumes = dataDirs.length - 2; + assertEquals("The volume has been removed from the volumeList.", + expectedNumVolumes, getNumVolumes()); + assertEquals("The volume has been removed from the storageMap.", + expectedNumVolumes, dataset.storageMap.size()); + + // DataNode.notifyNamenodeDeletedBlock() should be called 100 times + // as we deleted 2 volumes that have 100 blocks totally + verify(datanode, times(100)) + .notifyNamenodeDeletedBlock(any(), any()); + + for (FsVolumeImpl volume : volumes) { + try { + dataset.asyncDiskService.execute(volume, + new Runnable() { + @Override + public void run() {} + }); + fail("Expect RuntimeException: the volume has been removed from the " + + "AsyncDiskService."); + } catch (RuntimeException e) { + GenericTestUtils.assertExceptionContains("Cannot find volume", e); + } + } + + int totalNumReplicas = 0; + for (String bpid : dataset.volumeMap.getBlockPoolList()) { + totalNumReplicas += dataset.volumeMap.size(bpid); + } + assertEquals("The replica infos on this volume has been removed from the " + + "volumeMap.", 0, totalNumReplicas); + } + @Test(timeout = 5000) public void testRemoveNewlyAddedVolume() throws IOException { final int numExistingVolumes = getNumVolumes(); @@ -387,6 +473,41 @@ public void testAddVolumeFailureReleasesInUseLock() throws IOException { FsDatasetTestUtil.assertFileLockReleased(badDir.toString()); } + + @Test + /** + * This test is here primarily to catch any case where the datanode replica + * map structure is changed to a new structure which is not sorted and hence + * reading the blocks from it directly would not be sorted. + */ + public void testSortedFinalizedBlocksAreSorted() throws IOException { + this.conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + try { + cluster.waitActive(); + DataNode dn = cluster.getDataNodes().get(0); + + FsDatasetSpi ds = DataNodeTestUtils.getFSDataset(dn); + ds.addBlockPool(BLOCKPOOL, conf); + + // Load 1000 blocks with random blockIDs + for (int i=0; i<=1000; i++) { + ExtendedBlock eb = new ExtendedBlock( + BLOCKPOOL, new Random().nextInt(), 1000, 1000 + i); + cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb); + } + // Get the sorted blocks and validate the arrayList is sorted + List replicaList = ds.getSortedFinalizedBlocks(BLOCKPOOL); + for (int i=0; i 0) { + // Not sorted so fail the test + fail("ArrayList is not sorted, and it should be"); + } + } + } finally { + cluster.shutdown(); + } + } @Test public void testDeletingBlocks() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java index 867d3ab042cee..ab74b8db28c23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java index d154b1fcad603..2d54c480461c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java index c4c5aa0b9aaf5..d3232c8362949 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java @@ -62,8 +62,8 @@ import org.junit.Test; import org.slf4j.event.Level; -import com.google.common.base.Supplier; -import com.google.common.primitives.Ints; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; /** * Tests HDFS persistent memory cache by PmemMappableBlockLoader. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java index 7043227fe7753..f7aa4c34123d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java @@ -16,7 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java index 559828bda8339..9d5bfd7b2e80e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.commons.io.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java index c9e3a1dfed837..3eb5df4e3ab5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.metrics; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.metrics2.lib.MetricsTestHelper; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java index f06a87ae8f354..bd0cf3e363493 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hdfs.server.datanode.metrics; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java index 70618b12b8a7a..35790f9424116 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java @@ -51,8 +51,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestDataNodeUGIProvider { private final URI uri = URI.create(WebHdfsConstants.WEBHDFS_SCHEME + "://" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java index fef9c631a092e..2a496fb0ec222 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java index 931bdb4b74e43..a0e4e4db3f04a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java @@ -16,8 +16,8 @@ */ package org.apache.hadoop.hdfs.server.diskbalancer; -import com.google.common.base.Preconditions; -import com.google.common.base.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import java.util.function.Supplier; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationException; @@ -757,7 +757,7 @@ public Boolean get() { } }, 1000, 100000); - assertTrue("Disk balancer operation hit max errors!", errorCount.get() < + assertTrue("Disk balancer operation hit max errors!", errorCount.get() <= DFSConfigKeys.DFS_DISK_BALANCER_MAX_DISK_ERRORS_DEFAULT); createWorkPlanLatch.await(); removeDiskLatch.await(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java index f6a5d9c469276..7f6cd5e156ab6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java @@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.diskbalancer; -import com.google.common.base.Preconditions; -import com.google.common.base.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import java.util.function.Supplier; import org.apache.commons.codec.digest.DigestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java index e662fa1435265..1344ee0c88162 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java @@ -71,7 +71,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Tests various CLI commands of DiskBalancer. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 200178d262caa..5393b905dc3f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -95,8 +95,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; -import com.google.common.collect.Maps; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; public class TestMover { private static final Logger LOG = LoggerFactory.getLogger(TestMover.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 1b3faa0eee9d6..fec04be65b24a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -65,8 +65,8 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * Test the data migration tool (for Archival Storage) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index fd5064863772d..8a2c0e206fe88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -57,8 +57,8 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Tests NameNode interaction for all ACL modification APIs. This test suite diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 01dc31ba8c225..2de9a4e5d691d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -63,13 +63,13 @@ import org.apache.hadoop.io.IOUtils; import org.mockito.Mockito; -import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; /** * Utility functions for testing fsimage storage. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java index b5f75730b4811..019531c4de736 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -59,8 +59,8 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * Tests NameNode interaction for all XAttr APIs. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index f501673ef8361..245f5be39c5ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -27,7 +27,7 @@ import java.util.EnumSet; import java.util.List; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index 324f4fbe952f9..a584da1109f18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -56,6 +56,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.test.Whitebox; import org.mockito.Mockito; +import static org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.FSIMAGE_ATTRIBUTE_KEY; /** * This is a utility class to expose NameNode functionality for unit tests. @@ -129,6 +130,17 @@ public static Server getRpcServer(NameNode namenode) { return ((NameNodeRpcServer)namenode.getRpcServer()).clientRpcServer; } + /** + * Sets the FSImage used in the NameNodeHttpServer and returns the old value. + */ + public static FSImage getAndSetFSImageInHttpServer(NameNode namenode, + FSImage fsImage) { + FSImage previous = (FSImage) namenode.httpServer.getHttpServer() + .getAttribute(FSIMAGE_ATTRIBUTE_KEY); + namenode.httpServer.setFSImage(fsImage); + return previous; + } + public static DelegationTokenSecretManager getDtSecretManager( final FSNamesystem ns) { return ns.getDelegationTokenSecretManager(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java index 33f9081c665fa..f45728f167d46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java @@ -37,7 +37,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Tests that the configuration flag that controls support for ACLs is off by diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java index f66bf2a569209..a4df00ed06c20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java @@ -26,8 +26,8 @@ import java.util.List; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.junit.Test; import org.apache.hadoop.fs.permission.AclEntry; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java index ed3d41aeb8735..c03adf40bfff0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java @@ -40,7 +40,7 @@ import org.junit.rules.Timeout; import org.mockito.Mockito; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java index eb3f7ab718420..3eb311450e7f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index e8b948f3b0411..d02232feab2a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java index eeeea760df148..1f52cf33ba153 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java @@ -103,10 +103,7 @@ public void testLegacyAPI() throws IOException { thenReturn(mockEnforcer); FSPermissionChecker checker = new FSPermissionChecker( - fsOwner, superGroup, ugi, mockINodeAttributeProvider); - - // set operation type to null to force using the legacy API. - FSPermissionChecker.setOperationType(null); + fsOwner, superGroup, ugi, mockINodeAttributeProvider, false); when(iip.getPathSnapshotId()).thenReturn(snapshotId); when(iip.getINodesArray()).thenReturn(inodes); @@ -129,10 +126,10 @@ public void testCheckPermissionWithContextAPI() throws IOException { when(mockINodeAttributeProvider.getExternalAccessControlEnforcer(any())). thenReturn(mockEnforcer); + // force it to use the new, checkPermissionWithContext API. FSPermissionChecker checker = new FSPermissionChecker( - fsOwner, superGroup, ugi, mockINodeAttributeProvider); + fsOwner, superGroup, ugi, mockINodeAttributeProvider, true); - // force it to use the new, checkPermissionWithContext API. String operationName = "abc"; FSPermissionChecker.setOperationType(operationName); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 5ae9a3eb9770b..a6ab37e1252f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -56,10 +56,10 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestBackupNode { public static final Logger LOG = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 58baf730d755b..9a525481407ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -96,7 +96,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.base.Supplier; +import java.util.function.Supplier; public class TestCacheDirectives { static final Logger LOG = LoggerFactory.getLogger(TestCacheDirectives.class); @@ -132,17 +132,23 @@ private static HdfsConfiguration createCachingConf() { conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 2); - return conf; } + /** + * @return the configuration. + */ + Configuration getConf() { + return this.conf; + } + @Before public void setup() throws Exception { conf = createCachingConf(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); - dfs = cluster.getFileSystem(); + dfs = getDFS(); proto = cluster.getNameNodeRpc(); namenode = cluster.getNameNode(); prevCacheManipulator = NativeIO.POSIX.getCacheManipulator(); @@ -150,6 +156,13 @@ public void setup() throws Exception { BlockReaderTestUtil.enableHdfsCachingTracing(); } + /** + * @return the dfs instance. + */ + DistributedFileSystem getDFS() throws IOException { + return (DistributedFileSystem) FileSystem.get(conf); + } + @After public void teardown() throws Exception { // Remove cache directives left behind by tests so that we release mmaps. @@ -630,10 +643,12 @@ public void testCacheManagerRestart() throws Exception { String groupName = "partygroup"; FsPermission mode = new FsPermission((short)0777); long limit = 747; + long maxExpiry = 1234567890; dfs.addCachePool(new CachePoolInfo(pool) .setGroupName(groupName) .setMode(mode) - .setLimit(limit)); + .setLimit(limit) + .setMaxRelativeExpiryMs(maxExpiry)); RemoteIterator pit = dfs.listCachePools(); assertTrue("No cache pools found", pit.hasNext()); CachePoolInfo info = pit.next().getInfo(); @@ -641,6 +656,7 @@ public void testCacheManagerRestart() throws Exception { assertEquals(groupName, info.getGroupName()); assertEquals(mode, info.getMode()); assertEquals(limit, (long)info.getLimit()); + assertEquals(maxExpiry, (long)info.getMaxRelativeExpiryMs()); assertFalse("Unexpected # of cache pools found", pit.hasNext()); // Create some cache entries @@ -701,6 +717,7 @@ public void testCacheManagerRestart() throws Exception { assertEquals(groupName, info.getGroupName()); assertEquals(mode, info.getMode()); assertEquals(limit, (long)info.getLimit()); + assertEquals(maxExpiry, (long)info.getMaxRelativeExpiryMs()); assertFalse("Unexpected # of cache pools found", pit.hasNext()); dit = dfs.listCacheDirectives(null); @@ -1609,6 +1626,14 @@ public void testAddingCacheDirectiveInfosWhenCachingIsDisabled() "testAddingCacheDirectiveInfosWhenCachingIsDisabled:2"); } + /** + * @return the dfs instance for nnIdx. + */ + DistributedFileSystem getDFS(MiniDFSCluster cluster, int nnIdx) + throws IOException { + return cluster.getFileSystem(0); + } + @Test(timeout=120000) public void testExpiryTimeConsistency() throws Exception { conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); @@ -1619,7 +1644,7 @@ public void testExpiryTimeConsistency() throws Exception { .build(); dfsCluster.transitionToActive(0); - DistributedFileSystem fs = dfsCluster.getFileSystem(0); + DistributedFileSystem fs = getDFS(dfsCluster, 0); final NameNode ann = dfsCluster.getNameNode(0); final Path filename = new Path("/file"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectivesWithViewDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectivesWithViewDFS.java new file mode 100644 index 0000000000000..b80c2a656c6e2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectivesWithViewDFS.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.viewfs.ConfigUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.ViewDistributedFileSystem; + +import java.io.IOException; +import java.net.URI; + +public class TestCacheDirectivesWithViewDFS extends TestCacheDirectives { + + @Override + public DistributedFileSystem getDFS() throws IOException { + Configuration conf = getConf(); + conf.set("fs.hdfs.impl", ViewDistributedFileSystem.class.getName()); + URI defaultFSURI = + URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY)); + ConfigUtil.addLinkFallback(conf, defaultFSURI.getHost(), + new Path(defaultFSURI.toString()).toUri()); + ConfigUtil.addLink(conf, defaultFSURI.getHost(), "/tmp", + new Path(defaultFSURI.toString()).toUri()); + return super.getDFS(); + } + + @Override + public DistributedFileSystem getDFS(MiniDFSCluster cluster, int nnIdx) + throws IOException { + Configuration conf = cluster.getConfiguration(nnIdx); + conf.set("fs.hdfs.impl", ViewDistributedFileSystem.class.getName()); + URI uri = cluster.getURI(0); + ConfigUtil.addLinkFallback(conf, uri.getHost(), uri); + ConfigUtil.addLink(conf, uri.getHost(), "/tmp", uri); + return cluster.getFileSystem(0); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 572ad8bef7a55..3e1198d3e24f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -94,12 +94,12 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Joiner; -import com.google.common.base.Supplier; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; /** * This class tests the creation and validation of a checkpoint. @@ -2464,7 +2464,7 @@ public void testLegacyOivImage() throws Exception { } @Test(timeout = 300000) - public void testActiveRejectSmallerDeltaImage() throws Exception { + public void testActiveRejectSmallerTxidDeltaImage() throws Exception { MiniDFSCluster cluster = null; Configuration conf = new HdfsConfiguration(); // Set the delta txid threshold to 10 @@ -2517,6 +2517,57 @@ public void testActiveRejectSmallerDeltaImage() throws Exception { } } + /** + * Test that even with txid and time delta threshold, by having time + * relaxation, SBN can still upload images to ANN. + * + * @throws Exception + */ + @Test + public void testActiveImageWithTimeDeltaRelaxation() throws Exception { + Configuration conf = new HdfsConfiguration(); + // Set the delta txid threshold to some arbitrarily large value, so + // it does not trigger a checkpoint during this test. + conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1000000); + // Set the delta time threshold to some arbitrarily large value, so + // it does not trigger a checkpoint during this test. + conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 900000); + // Set relaxation to 0, means time delta = 0 from previous image is fine, + // this will effectively disable reject small delta image + ImageServlet.setRecentImageCheckTimePrecision(0); + + SecondaryNameNode secondary = null; + + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0).format(true).build()) { + // enable small delta rejection + NameNode active = cluster.getNameNode(); + active.httpServer.getHttpServer() + .setAttribute(RECENT_IMAGE_CHECK_ENABLED, true); + + secondary = startSecondaryNameNode(conf); + + FileSystem fs = cluster.getFileSystem(); + assertEquals(0, active.getNamesystem().getFSImage() + .getMostRecentCheckpointTxId()); + + // create 5 dir. + for (int i = 0; i < 5; i++) { + fs.mkdirs(new Path("dir-" + i)); + } + + // Checkpoint 1st + secondary.doCheckpoint(); + // at this point, despite this is a small delta change, w.r.t both + // txid and time delta, due to we set relaxation to 0, this image + // still gets accepted + assertEquals(9, active.getNamesystem().getFSImage() + .getMostRecentCheckpointTxId()); + } finally { + cleanup(secondary); + } + } + private static void cleanup(SecondaryNameNode snn) { if (snn != null) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index 46b8db1cd9bc4..16f4de96877cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index 923750cfe8ce8..cdb57f240fa54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -21,13 +21,11 @@ import java.io.IOException; import java.util.AbstractMap; import java.util.ArrayList; -import java.util.Comparator; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.Semaphore; import org.apache.hadoop.fs.Options; @@ -54,7 +52,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; -import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.Node; @@ -67,7 +64,7 @@ import org.junit.rules.Timeout; import org.mockito.Mockito; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY; import static org.junit.Assert.assertNotEquals; @@ -385,6 +382,10 @@ public void testDeleteAndLeaseRecoveryHardLimitSnapshot() throws Exception { // Disable permissions so that another user can recover the lease. config.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); config.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + long leaseRecheck = 1000; + conf.setLong(DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY, leaseRecheck); + conf.setLong(DFS_LEASE_HARDLIMIT_KEY, leaseRecheck/1000); + FSDataOutputStream stm = null; try { cluster = new MiniDFSCluster.Builder(config).numDataNodes(3).build(); @@ -409,30 +410,8 @@ public void testDeleteAndLeaseRecoveryHardLimitSnapshot() throws Exception { // the streamer. AppendTestUtil.write(stm, 0, BLOCK_SIZE); - // Mock a scenario that the lease reached hard limit. - final LeaseManager lm = (LeaseManager) Whitebox - .getInternalState(cluster.getNameNode().getNamesystem(), - "leaseManager"); - final TreeSet leases = - (TreeSet) Whitebox.getInternalState(lm, "sortedLeases"); - final TreeSet spyLeases = new TreeSet<>(new Comparator() { - @Override - public int compare(Lease o1, Lease o2) { - return Long.signum(o1.getLastUpdate() - o2.getLastUpdate()); - } - }); - while (!leases.isEmpty()) { - final Lease lease = leases.first(); - final Lease spyLease = Mockito.spy(lease); - Mockito.doReturn(true).when(spyLease).expiredHardLimit(); - spyLeases.add(spyLease); - leases.remove(lease); - } - Whitebox.setInternalState(lm, "sortedLeases", spyLeases); - // wait for lease manager's background 'Monitor' class to check leases. - Thread.sleep(2 * conf.getLong(DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY, - DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_DEFAULT)); + Thread.sleep(2 * leaseRecheck); LOG.info("Now check we can restart"); cluster.restartNameNodes(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 4cdb05f792921..0592b97b78c6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -103,8 +103,8 @@ import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * This class tests the creation and validation of a checkpoint. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java index eb4f042f866e6..c6aad174382b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java @@ -46,7 +46,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import com.google.common.base.Supplier; +import java.util.function.Supplier; @RunWith(Parameterized.class) public class TestEditLogAutoroll { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index a083140dc0fe2..f844eb3e1c54c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -38,7 +38,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java index 45a785be75e0a..3d391b0468761 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java @@ -20,8 +20,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; import org.junit.Test; import org.mockito.Mockito; @@ -57,6 +59,54 @@ private boolean unprotectedSetTimes(long atime, long atime0, long precision, return FSDirAttrOp.unprotectedSetTimes(fsd, iip, mtime, atime, force); } + private boolean unprotectedSetAttributes(short currPerm, short newPerm) + throws Exception { + return unprotectedSetAttributes(currPerm, newPerm, "user1", "user1", + false); + } + + private boolean unprotectedSetAttributes(short currPerm, short newPerm, + String currUser, String newUser, boolean testChangeOwner) + throws Exception { + String groupName = "testGroup"; + FsPermission originalPerm = new FsPermission(currPerm); + FsPermission updatedPerm = new FsPermission(newPerm); + FSNamesystem fsn = Mockito.mock(FSNamesystem.class); + SnapshotManager ssMgr = Mockito.mock(SnapshotManager.class); + FSDirectory fsd = Mockito.mock(FSDirectory.class); + INodesInPath iip = Mockito.mock(INodesInPath.class); + when(fsd.getFSNamesystem()).thenReturn(fsn); + when(fsn.getSnapshotManager()).thenReturn(ssMgr); + when(ssMgr.getSkipCaptureAccessTimeOnlyChange()).thenReturn(false); + when(fsd.getAccessTimePrecision()).thenReturn(1000L); + when(fsd.hasWriteLock()).thenReturn(Boolean.TRUE); + when(iip.getLatestSnapshotId()).thenReturn(0); + INode inode = new INodeDirectory(1000, DFSUtil.string2Bytes(""), + new PermissionStatus(currUser, "testGroup", originalPerm), 0L); + when(iip.getLastINode()).thenReturn(inode); + return testChangeOwner ? FSDirAttrOp.unprotectedSetOwner(fsd, iip, newUser, + groupName) : FSDirAttrOp.unprotectedSetPermission(fsd, iip, + updatedPerm); + } + + @Test + public void testUnprotectedSetPermissions() throws Exception { + assertTrue("setPermissions return true for updated permissions", + unprotectedSetAttributes((short) 0777, (short) 0)); + assertFalse("setPermissions should return false for same permissions", + unprotectedSetAttributes((short) 0777, (short) 0777)); + } + + @Test + public void testUnprotectedSetOwner() throws Exception { + assertTrue("SetOwner should return true for a new user", + unprotectedSetAttributes((short) 0777, (short) 0777, "user1", + "user2", true)); + assertFalse("SetOwner should return false for same user", + unprotectedSetAttributes((short) 0777, (short) 0777, "user1", + "user1", true)); + } + @Test public void testUnprotectedSetTimes() throws Exception { // atime < access time + precision diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java index d143013fdefde..c159d4d61c09b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Random; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -47,7 +47,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 57c0453f1f850..8008be79d9113 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -73,8 +73,8 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import com.google.common.collect.Maps; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; @RunWith(Parameterized.class) public class TestFSEditLogLoader { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 793a749be21c4..f4cd33bc9ffa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -35,7 +35,8 @@ import java.util.ArrayList; import java.util.EnumSet; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.Block; @@ -49,6 +50,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.NativeCodeLoader; @@ -1152,4 +1154,63 @@ private void ensureSubSectionsAlignWithParent(ArrayList

    subSec, // The first sub-section and parent section should have the same offset assertEquals(parent.getOffset(), subSec.get(0).getOffset()); } + + @Test + public void testUpdateBlocksMapAndNameCacheAsync() throws IOException { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + DistributedFileSystem fs = cluster.getFileSystem(); + FSDirectory fsdir = cluster.getNameNode().namesystem.getFSDirectory(); + File workingDir = GenericTestUtils.getTestDir(); + + File preRestartTree = new File(workingDir, "preRestartTree"); + File postRestartTree = new File(workingDir, "postRestartTree"); + + Path baseDir = new Path("/user/foo"); + fs.mkdirs(baseDir); + fs.allowSnapshot(baseDir); + for (int i = 0; i < 5; i++) { + Path dir = new Path(baseDir, Integer.toString(i)); + fs.mkdirs(dir); + for (int j = 0; j < 5; j++) { + Path file = new Path(dir, Integer.toString(j)); + FSDataOutputStream os = fs.create(file); + os.write((byte) j); + os.close(); + } + fs.createSnapshot(baseDir, "snap_"+i); + fs.rename(new Path(dir, "0"), new Path(dir, "renamed")); + } + SnapshotTestHelper.dumpTree2File(fsdir, preRestartTree); + + // checkpoint + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + fs.saveNamespace(); + fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + cluster.restartNameNode(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + fsdir = cluster.getNameNode().namesystem.getFSDirectory(); + + // Ensure all the files created above exist, and blocks is correct. + for (int i = 0; i < 5; i++) { + Path dir = new Path(baseDir, Integer.toString(i)); + assertTrue(fs.getFileStatus(dir).isDirectory()); + for (int j = 0; j < 5; j++) { + Path file = new Path(dir, Integer.toString(j)); + if (j == 0) { + file = new Path(dir, "renamed"); + } + FSDataInputStream in = fs.open(file); + int n = in.readByte(); + assertEquals(j, n); + in.close(); + } + } + SnapshotTestHelper.dumpTree2File(fsdir, postRestartTree); + SnapshotTestHelper.compareDumpedTreeInFile( + preRestartTree, postRestartTree, true); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java index d9c24d9c9f9d5..81251ee81cb7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java @@ -39,7 +39,7 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestFSImageWithAcl { private static Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index 0769a7f96e297..f27c8f28bd448 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -17,17 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.File; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.EnumSet; -import java.util.List; -import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -48,12 +37,22 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.test.GenericTestUtils; -import org.slf4j.event.Level; - import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.event.Level; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; +import java.util.Random; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Test FSImage save/load when Snapshot is supported @@ -610,4 +609,44 @@ String printTree(String label) throws Exception { output.println(b); return b; } + + @Test (timeout=60000) + public void testFSImageWithDoubleRename() throws Exception { + final Path dir1 = new Path("/dir1"); + final Path dir2 = new Path("/dir2"); + hdfs.mkdirs(dir1); + hdfs.mkdirs(dir2); + Path dira = new Path(dir1, "dira"); + Path dirx = new Path(dir1, "dirx"); + Path dirb = new Path(dira, "dirb"); + hdfs.mkdirs(dira); + hdfs.mkdirs(dirb); + hdfs.mkdirs(dirx); + hdfs.allowSnapshot(dir1); + hdfs.createSnapshot(dir1, "s0"); + Path file1 = new Path(dirb, "file1"); + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, (short) 1, seed); + Path rennamePath = new Path(dirx, "dirb"); + // mv /dir1/dira/dirb to /dir1/dirx/dirb + hdfs.rename(dirb, rennamePath); + hdfs.createSnapshot(dir1, "s1"); + DFSTestUtil.appendFile(hdfs, new Path("/dir1/dirx/dirb/file1"), + "more data"); + Path renamePath1 = new Path(dir2, "dira"); + hdfs.mkdirs(renamePath1); + //mv dirx/dirb to /dir2/dira/dirb + hdfs.rename(rennamePath, renamePath1); + hdfs.delete(renamePath1, true); + hdfs.deleteSnapshot(dir1, "s1"); + // save namespace and restart cluster + hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + hdfs.saveNamespace(); + hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + cluster.shutdown(); + cluster = new MiniDFSCluster.Builder(conf).format(false) + .numDataNodes(NUM_DATANODES).build(); + cluster.waitActive(); + fsn = cluster.getNamesystem(); + hdfs = cluster.getFileSystem(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java index 549b98afa3589..f0ae181016703 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.metrics2.MetricsRecordBuilder; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index f258caea0d545..0155d876169d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -49,8 +49,8 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; public class TestFileJournalManager { static final Logger LOG = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index ed0f0742866a6..0964b3ede43fd 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -33,6 +33,9 @@ import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.test.LambdaTestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -218,6 +221,70 @@ public void testSnapshotTruncateThenDeleteSnapshot() throws IOException { fs.delete(dir, true); } + /** + * Test truncate twice together on a file. + */ + @Test(timeout=90000) + public void testTruncateTwiceTogether() throws Exception { + + Path dir = new Path("/testTruncateTwiceTogether"); + fs.mkdirs(dir); + final Path p = new Path(dir, "file"); + final byte[] data = new byte[100 * BLOCK_SIZE]; + ThreadLocalRandom.current().nextBytes(data); + writeContents(data, data.length, p); + + DataNodeFaultInjector originInjector = DataNodeFaultInjector.get(); + DataNodeFaultInjector injector = new DataNodeFaultInjector() { + @Override + public void delay() { + try { + // Bigger than soft lease period. + Thread.sleep(5000); + } catch (InterruptedException e) { + // Ignore + } + } + }; + // Delay to recovery. + DataNodeFaultInjector.set(injector); + + // Truncate by using different client name. + Thread t = new Thread(() -> { + String hdfsCacheDisableKey = "fs.hdfs.impl.disable.cache"; + boolean originCacheDisable = + conf.getBoolean(hdfsCacheDisableKey, false); + try { + conf.setBoolean(hdfsCacheDisableKey, true); + FileSystem fs1 = FileSystem.get(conf); + fs1.truncate(p, data.length-1); + } catch (IOException e) { + // ignore + } finally{ + conf.setBoolean(hdfsCacheDisableKey, originCacheDisable); + } + }); + t.start(); + t.join(); + NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) + .setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT); + + LambdaTestUtils.intercept(RemoteException.class, + "/testTruncateTwiceTogether/file is being truncated", + () -> fs.truncate(p, data.length - 2)); + + // wait for block recovery + checkBlockRecovery(p); + assertFileLength(p, data.length - 1); + + DataNodeFaultInjector.set(originInjector); + NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) + .setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, + conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY, + DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000); + fs.delete(dir, true); + } + /** * Truncate files and then run other operations such as * rename, set replication, set permission, etc. @@ -631,7 +698,7 @@ public void testTruncateFailure() throws IOException { { try { fs.truncate(p, 0); - fail("Truncate must fail since a trancate is already in pregress."); + fail("Truncate must fail since a truncate is already in progress."); } catch (IOException expected) { GenericTestUtils.assertExceptionContains( "Failed to TRUNCATE_FILE", expected); @@ -1319,4 +1386,38 @@ public void testQuotaOnTruncateWithSnapshot() throws Exception { assertEquals(fs.getContentSummary(root).getSpaceConsumed(), fs.getQuotaUsage(root).getSpaceConsumed()); } + + /** + * Test concat on file which is a reference. + */ + @Test + public void testConcatOnInodeRefernce() throws IOException { + String dir = "/testConcat"; + Path trgDir = new Path(dir); + fs.mkdirs(new Path(dir), FsPermission.getDirDefault()); + + // Create a target file + Path trg = new Path(dir, "file"); + DFSTestUtil.createFile(fs, trg, 512, (short) 2, 0); + + String dir2 = "/dir2"; + Path srcDir = new Path(dir2); + // create a source file + fs.mkdirs(srcDir); + fs.allowSnapshot(srcDir); + Path src = new Path(srcDir, "file1"); + DFSTestUtil.createFile(fs, src, 512, (short) 2, 0); + + // make the file as an Inode reference and delete the reference + fs.createSnapshot(srcDir, "s1"); + fs.rename(src, trgDir); + fs.deleteSnapshot(srcDir, "s1"); + Path[] srcs = new Path[1]; + srcs[0] = new Path(dir, "file1"); + assertEquals(2, fs.getContentSummary(new Path(dir)).getFileCount()); + + // perform concat + fs.concat(trg, srcs); + assertEquals(1, fs.getContentSummary(new Path(dir)).getFileCount()); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index b035848319a36..d89bb6b4332c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -59,7 +59,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.commons.logging.impl.Log4JLogger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -129,7 +129,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * A JUnit test for doing fsck. @@ -254,6 +254,7 @@ private void setupAuditLogs() throws IOException { file.delete(); } Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + logger.removeAllAppenders(); logger.setLevel(Level.INFO); PatternLayout layout = new PatternLayout("%m%n"); RollingFileAppender appender = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index 4839783c95278..1608a84168d68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -45,6 +45,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -525,4 +526,42 @@ public void testConcatReservedRelativePaths() throws IOException { GenericTestUtils.assertExceptionContains(errMsg, e); } } + + /** + * Test concat on same source and target file which is a inode reference. + */ + @Test + public void testConcatOnSameFile() throws Exception { + String dir = "/dir1"; + Path trgDir = new Path(dir); + dfs.mkdirs(new Path(dir)); + + // create a source file + String dir2 = "/dir2"; + Path srcDir = new Path(dir2); + dfs.mkdirs(srcDir); + dfs.allowSnapshot(srcDir); + Path src = new Path(srcDir, "file1"); + DFSTestUtil.createFile(dfs, src, 512, (short) 2, 0); + + // make the file as an Inode reference and delete the reference + dfs.createSnapshot(srcDir, "s1"); + dfs.rename(src, trgDir); + dfs.deleteSnapshot(srcDir, "s1"); + Path[] srcs = new Path[1]; + srcs[0] = new Path(dir, "file1"); + + // perform concat + LambdaTestUtils.intercept(RemoteException.class, + "concat: the src file /dir1/file1 is the same with the target" + + " file /dir1/file1", + () -> dfs.concat(srcs[0], srcs)); + + // the file should exists and read the file + byte[] buff = new byte[1080]; + FSDataInputStream stream = dfs.open(srcs[0]); + stream.readFully(0, buff, 0, 512); + + assertEquals(1, dfs.getContentSummary(new Path(dir)).getFileCount()); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java index 433be79b87a28..f4c5763336b0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java @@ -24,7 +24,7 @@ import java.util.Map; import java.util.Set; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; @@ -44,7 +45,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestINodeAttributeProvider { private static final Logger LOG = @@ -80,6 +81,7 @@ public void checkPermission(String fsOwner, String supergroup, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access); + CALLED.add("checkPermission|" + path); } @Override @@ -93,6 +95,7 @@ public void checkPermissionWithContext( CALLED.add("checkPermission|" + authzContext.getAncestorAccess() + "|" + authzContext.getParentAccess() + "|" + authzContext .getAccess()); + CALLED.add("checkPermission|" + authzContext.getPath()); } } @@ -109,7 +112,12 @@ public void stop() { @Override public INodeAttributes getAttributes(String[] pathElements, final INodeAttributes inode) { + String fullPath = String.join("/", pathElements); + if (!fullPath.startsWith("/")) { + fullPath = "/" + fullPath; + } CALLED.add("getAttributes"); + CALLED.add("getAttributes|"+fullPath); final boolean useDefault = useDefault(pathElements); final boolean useNullAcl = useNullAclFeature(pathElements); return new INodeAttributes() { @@ -485,4 +493,111 @@ public Void run() throws Exception { } }); } + + @Test + // HDFS-15372 - Attribute provider should not see the snapshot path as it + // should be resolved into the original path name before it hits the provider. + public void testAttrProviderSeesResolvedSnapahotPaths() throws Exception { + FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); + DistributedFileSystem hdfs = miniDFS.getFileSystem(); + final Path userPath = new Path("/user"); + final Path authz = new Path("/user/authz"); + final Path authzChild = new Path("/user/authz/child2"); + + fs.mkdirs(userPath); + fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION)); + fs.mkdirs(authz); + hdfs.allowSnapshot(userPath); + fs.setPermission(authz, new FsPermission(HDFS_PERMISSION)); + fs.mkdirs(authzChild); + fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION)); + fs.createSnapshot(userPath, "snapshot_1"); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1", + new String[]{"g1"}); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); + final Path snapChild = + new Path("/user/.snapshot/snapshot_1/authz/child2"); + // Run various methods on the path to access the attributes etc. + fs.getAclStatus(snapChild); + fs.getContentSummary(snapChild); + fs.getFileStatus(snapChild); + Assert.assertFalse(CALLED.contains("getAttributes|" + + snapChild.toString())); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz/child2")); + // The snapshot path should be seen by the permission checker, but when + // it checks access, the paths will be resolved so the attributeProvider + // only sees the resolved path. + Assert.assertTrue( + CALLED.contains("checkPermission|" + snapChild.toString())); + CALLED.clear(); + fs.getAclStatus(new Path("/")); + Assert.assertTrue(CALLED.contains("checkPermission|/")); + Assert.assertTrue(CALLED.contains("getAttributes|/")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user")); + Assert.assertTrue(CALLED.contains("checkPermission|/user")); + Assert.assertTrue(CALLED.contains("getAttributes|/user")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot")); + Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); + // attribute provider never sees the .snapshot path directly. + Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED + .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/authz")); + Assert.assertTrue(CALLED.contains("checkPermission|/user/authz")); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); + return null; + } + }); + // Delete the files / folders covered by the snapshot, then re-check they + // are all readable correctly. + fs.delete(authz, true); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot")); + Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); + // attribute provider never sees the .snapshot path directly. + Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); + Assert.assertTrue( + CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); + + CALLED.clear(); + fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED + .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); + Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); + + return null; + } + }); + + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 0553678cd8689..b32f8fe759d1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -77,7 +77,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; public class TestINodeFile { // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java index ccd908b645962..61eeb362a183c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java @@ -22,7 +22,7 @@ import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java index 2158bc7a44c8a..c6603cfee2ce0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -295,4 +296,29 @@ public void testListOpenFilesWithFilterPath() throws IOException { verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT); } } + + @Test + public void testListOpenFilesWithInvalidPathServerSide() throws Exception { + HashMap openFiles = new HashMap<>(); + openFiles.putAll( + DFSTestUtil.createOpenFiles(fs, new Path("/base"), "open-1", 1)); + verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), + "/base"); + intercept(AssertionError.class, "Absolute path required", + "Expect InvalidPathException", () -> verifyOpenFiles(new HashMap<>(), + EnumSet.of(OpenFilesType.ALL_OPEN_FILES), "hdfs://cluster/base")); + while(openFiles.size() > 0) { + DFSTestUtil.closeOpenFiles(openFiles, 1); + verifyOpenFiles(openFiles); + } + } + + @Test + public void testListOpenFilesWithInvalidPathClientSide() throws Exception { + intercept(IllegalArgumentException.class, "Wrong FS", + "Expect IllegalArgumentException", () -> fs + .listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES), + "hdfs://non-cluster/")); + fs.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES), "/path"); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index d4748f3d60140..0c9b499ddeba7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -30,7 +30,7 @@ import java.util.ArrayList; import java.util.concurrent.TimeoutException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index baf999d6b7a39..18099420b318d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.junit.Test; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index 7db699ef30e11..f00bbca4cbf01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -50,10 +50,10 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestNNStorageRetentionManager { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 7a2fc9abef10c..7071c661f3354 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -41,8 +41,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; /** * This class tests various combinations of dfs.namenode.name.dir diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 7157cff0e7a2a..a309e324f5485 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Supplier; -import com.google.common.util.concurrent.Uninterruptibles; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -435,6 +435,103 @@ public Boolean get() { } } + @Test(timeout = 120000) + public void testInServiceNodes() throws Exception { + Configuration conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, + 30); + conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, + CombinedHostFileManager.class, HostConfigManager.class); + MiniDFSCluster cluster = null; + HostsFileWriter hostsFileWriter = new HostsFileWriter(); + hostsFileWriter.initialize(conf, "temp/TestInServiceNodes"); + + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + + final FSNamesystem fsn = cluster.getNameNode().namesystem; + final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + final ObjectName mxbeanName = new ObjectName( + "Hadoop:service=NameNode,name=FSNamesystem"); + + List hosts = new ArrayList<>(); + for (DataNode dn : cluster.getDataNodes()) { + hosts.add(dn.getDisplayName()); + } + hostsFileWriter.initIncludeHosts(hosts.toArray( + new String[hosts.size()])); + fsn.getBlockManager().getDatanodeManager().refreshNodes(conf); + + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + int numLiveDataNodes = (int) mbs.getAttribute(mxbeanName, + "NumLiveDataNodes"); + return numLiveDataNodes == 3; + } catch (Exception e) { + return false; + } + } + }, 1000, 60000); + + // Verify nodes + int numDecomLiveDataNodes = (int) mbs.getAttribute(mxbeanName, + "NumDecomLiveDataNodes"); + int numInMaintenanceLiveDataNodes = (int) mbs.getAttribute(mxbeanName, + "NumInMaintenanceLiveDataNodes"); + int numInServiceLiveDataNodes = (int) mbs.getAttribute(mxbeanName, + "NumInServiceLiveDataNodes"); + assertEquals(0, numDecomLiveDataNodes); + assertEquals(0, numInMaintenanceLiveDataNodes); + assertEquals(3, numInServiceLiveDataNodes); + + // Add 2 nodes to out-of-service list + ArrayList decomNodes = new ArrayList<>(); + decomNodes.add(cluster.getDataNodes().get(0).getDisplayName()); + + Map maintenanceNodes = new HashMap<>(); + final int expirationInMs = 30 * 1000; + maintenanceNodes.put(cluster.getDataNodes().get(1).getDisplayName(), + Time.now() + expirationInMs); + + hostsFileWriter.initOutOfServiceHosts(decomNodes, maintenanceNodes); + fsn.getBlockManager().getDatanodeManager().refreshNodes(conf); + + // Wait for the DatanodeAdminManager to complete check + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + int numLiveDataNodes = (int) mbs.getAttribute(mxbeanName, + "NumLiveDataNodes"); + int numDecomLiveDataNodes = (int) mbs.getAttribute(mxbeanName, + "NumDecomLiveDataNodes"); + int numInMaintenanceLiveDataNodes = (int) mbs.getAttribute( + mxbeanName, "NumInMaintenanceLiveDataNodes"); + return numLiveDataNodes == 3 && + numDecomLiveDataNodes == 1 && + numInMaintenanceLiveDataNodes == 1; + } catch (Exception e) { + return false; + } + } + }, 1000, 60000); + + // Verify nodes + numInServiceLiveDataNodes = (int) mbs.getAttribute(mxbeanName, + "NumInServiceLiveDataNodes"); + assertEquals(1, numInServiceLiveDataNodes); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + hostsFileWriter.cleanup(); + } + } + @Test (timeout = 120000) public void testMaintenanceNodes() throws Exception { LOG.info("Starting testMaintenanceNodes"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java index 2677781babcad..3e80091307c0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.test.GenericTestUtils; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.junit.After; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java index 25642faffbf29..9b5e9884c525a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.commons.logging.Log; import org.apache.commons.logging.impl.Log4JLogger; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java index 3265bed80c1ce..ada7c82150a12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java @@ -24,6 +24,7 @@ import org.junit.Before; import org.junit.After; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; import static org.junit.Assert.*; import org.slf4j.Logger; @@ -378,6 +379,21 @@ public void testBlockInvalidateLimitAfterReconfigured() datanodeManager.getBlockInvalidateLimit()); } + @Test + public void testEnableParallelLoadAfterReconfigured() + throws ReconfigurationException { + final NameNode nameNode = cluster.getNameNode(); + + // By default, enableParallelLoad is false + assertEquals(false, FSImageFormatProtobuf.getEnableParallelLoad()); + + nameNode.reconfigureProperty(DFS_IMAGE_PARALLEL_LOAD_KEY, + Boolean.toString(true)); + + // After reconfigured, enableParallelLoad is true + assertEquals(true, FSImageFormatProtobuf.getEnableParallelLoad()); + } + @After public void shutDown() throws IOException { if (cluster != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 26701a59d8a9b..9a9f5aaa7a3f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -57,7 +57,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * This tests data recovery mode for the NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java index 1b0a671ff0c13..a2896cec9fd09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java index 4343b0acd038f..c3abc12bf91c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java @@ -226,9 +226,9 @@ public void testXceiverCountInternal(int minMaintenanceR) throws Exception { triggerHeartbeats(datanodes); // check that all nodes are live and in service - int expectedTotalLoad = nodes; // xceiver server adds 1 to load + int expectedTotalLoad = 0; int expectedInServiceNodes = nodes; - int expectedInServiceLoad = nodes; + int expectedInServiceLoad = 0; checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad); @@ -333,10 +333,7 @@ public void testXceiverCountInternal(int minMaintenanceR) throws Exception { expectedInServiceNodes--; } assertEquals(expectedInServiceNodes, getNumDNInService(namesystem)); - // live nodes always report load of 1. no nodes is load 0 - double expectedXceiverAvg = (i == nodes-1) ? 0.0 : 1.0; - assertEquals((double)expectedXceiverAvg, - getInServiceXceiverAverage(namesystem), EPSILON); + assertEquals(0, getInServiceXceiverAverage(namesystem), EPSILON); } // final sanity check checkClusterHealth(0, namesystem, 0.0, 0, 0.0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java index 1ac9257a572f3..5a6d12a27ebd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java @@ -37,7 +37,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.io.IOException; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java index 67d2ffbe06c7d..ea68ee705bafb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java @@ -18,14 +18,16 @@ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.base.Joiner; -import com.google.common.collect.Iterables; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; @@ -36,9 +38,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.*; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -193,6 +197,25 @@ private Collection createTestMatrix() { return matrix; } + private Collection createTestMatrixForProtectSubDirs() { + Collection matrix = new ArrayList(); + + // Nested unprotected dirs. + matrix.add(TestMatrixEntry.get() + .addUnprotectedDir("/1", true) + .addUnprotectedDir("/1/2", true) + .addUnprotectedDir("/1/2/3", true) + .addUnprotectedDir("/1/2/3/4", true)); + + // Non-empty protected dir. + matrix.add(TestMatrixEntry.get() + .addProtectedDir("/1", false) + .addUnprotectedDir("/1/2", false) + .addUnprotectedDir("/1/2/3", false) + .addUnprotectedDir("/1/2/3/4", true)); + return matrix; + } + @Test public void testReconfigureProtectedPaths() throws Throwable { Configuration conf = new HdfsConfiguration(); @@ -264,6 +287,31 @@ public void testDelete() throws Throwable { } } + @Test + public void testMoveToTrash() throws Throwable { + for (TestMatrixEntry testMatrixEntry : createTestMatrix()) { + Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600); + MiniDFSCluster cluster = setupTestCase( + conf, testMatrixEntry.getProtectedPaths(), + testMatrixEntry.getUnprotectedPaths()); + + try { + LOG.info("Running {}", testMatrixEntry); + FileSystem fs = cluster.getFileSystem(); + for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) { + assertThat( + testMatrixEntry + ": Testing whether " + path + + " can be moved to trash", + moveToTrash(fs, path, conf), + is(testMatrixEntry.canPathBeDeleted(path))); + } + } finally { + cluster.shutdown(); + } + } + } + /* * Verify that protected directories could not be renamed. */ @@ -292,6 +340,94 @@ public void testRename() throws Throwable { } } + @Test + public void testRenameProtectSubDirs() throws Throwable { + for (TestMatrixEntry testMatrixEntry : + createTestMatrixForProtectSubDirs()) { + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true); + MiniDFSCluster cluster = setupTestCase( + conf, testMatrixEntry.getProtectedPaths(), + testMatrixEntry.getUnprotectedPaths()); + + try { + LOG.info("Running {}", testMatrixEntry); + FileSystem fs = cluster.getFileSystem(); + for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) { + assertThat( + testMatrixEntry + ": Testing whether " + + srcPath + " can be renamed", + renamePath(fs, srcPath, + new Path(srcPath.toString() + "_renamed")), + is(testMatrixEntry.canPathBeRenamed(srcPath))); + } + } finally { + cluster.shutdown(); + } + } + } + + @Test + public void testMoveProtectedSubDirsToTrash() throws Throwable { + for (TestMatrixEntry testMatrixEntry : + createTestMatrixForProtectSubDirs()) { + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true); + conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600); + MiniDFSCluster cluster = setupTestCase( + conf, testMatrixEntry.getProtectedPaths(), + testMatrixEntry.getUnprotectedPaths()); + + try { + LOG.info("Running {}", testMatrixEntry); + FileSystem fs = cluster.getFileSystem(); + for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) { + assertThat( + testMatrixEntry + ": Testing whether " + + srcPath + " can be moved to trash", + moveToTrash(fs, srcPath, conf), + is(testMatrixEntry.canPathBeRenamed(srcPath))); + } + } finally { + cluster.shutdown(); + } + } + } + + @Test + public void testDeleteProtectSubDirs() throws Throwable { + for (TestMatrixEntry testMatrixEntry : + createTestMatrixForProtectSubDirs()) { + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true); + MiniDFSCluster cluster = setupTestCase( + conf, testMatrixEntry.getProtectedPaths(), + testMatrixEntry.getUnprotectedPaths()); + + try { + LOG.info("Running {}", testMatrixEntry); + FileSystem fs = cluster.getFileSystem(); + for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) { + final long countBefore = cluster.getNamesystem().getFilesTotal(); + assertThat( + testMatrixEntry + ": Testing whether " + + path + " can be deleted", + deletePath(fs, path), + is(testMatrixEntry.canPathBeDeleted(path))); + final long countAfter = cluster.getNamesystem().getFilesTotal(); + + if (!testMatrixEntry.canPathBeDeleted(path)) { + assertThat( + "Either all paths should be deleted or none", + countAfter, is(countBefore)); + } + } + } finally { + cluster.shutdown(); + } + } + } + /** * Verify that configured paths are normalized by removing * redundant separators. @@ -384,6 +520,21 @@ private boolean deletePath(FileSystem fs, Path path) throws IOException { } } + private boolean moveToTrash(FileSystem fs, Path path, Configuration conf) { + try { + return Trash.moveToAppropriateTrash(fs, path, conf); + } catch (FileNotFoundException fnf) { + // fs.delete(...) does not throw an exception if the file does not exist. + // The deletePath method in this class, will therefore return true if + // there is an attempt to delete a file which does not exist. Therefore + // catching this exception and returning true to keep it consistent and + // allow tests to work with the same test matrix. + return true; + } catch (IOException ace) { + return false; + } + } + /** * Return true if the path was successfully renamed. False if it * failed with AccessControlException. Any other exceptions are diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java index 5d34d3cc75e29..7f4be8afc93a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java @@ -31,7 +31,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java index 7f5110739ad67..345a21c8d59ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java index bff549a1e1bff..245602ee9bfa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java @@ -32,7 +32,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java index 4995208a0e1e6..19f1ca975f4b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java @@ -52,7 +52,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; /** * Startup and checkpoint tests * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java index df02b35f7229e..1ec08e49153ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java @@ -47,7 +47,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; public class TestTransferFsImage { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java index 3383c4ee47b0e..0421941f3f4c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java @@ -17,16 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.util.HashSet; import java.util.Set; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -41,16 +38,14 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus; import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager; import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager; -import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.util.HostsFileWriter; import org.apache.hadoop.net.StaticMapping; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; - /** * End-to-end test case for upgrade domain * The test configs upgrade domain for nodes via admin json @@ -63,6 +58,8 @@ public class TestUpgradeDomainBlockPlacementPolicy { private static final short REPLICATION_FACTOR = (short) 3; private static final int DEFAULT_BLOCK_SIZE = 1024; + private static final int WAIT_TIMEOUT_MS = 60000; + private static final long FILE_SIZE = DEFAULT_BLOCK_SIZE * 5; static final String[] racks = { "/RACK1", "/RACK1", "/RACK1", "/RACK2", "/RACK2", "/RACK2" }; static final String[] hosts = @@ -71,9 +68,6 @@ public class TestUpgradeDomainBlockPlacementPolicy { {"ud5", "ud2", "ud3", "ud1", "ud2", "ud4"}; static final Set expectedDatanodeIDs = new HashSet<>(); private MiniDFSCluster cluster = null; - private NamenodeProtocols nameNodeRpc = null; - private FSNamesystem namesystem = null; - private PermissionStatus perm = null; private HostsFileWriter hostsFileWriter = new HostsFileWriter(); @Before @@ -92,10 +86,6 @@ public void setup() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).racks(racks) .hosts(hosts).build(); cluster.waitActive(); - nameNodeRpc = cluster.getNameNodeRpc(); - namesystem = cluster.getNamesystem(); - perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, - FsPermission.getDefault()); refreshDatanodeAdminProperties(); } @@ -186,43 +176,51 @@ private void refreshDatanodeAdminProperties2() expectedDatanodeIDs.add(cluster.getDataNodes().get(5).getDatanodeId()); } + private void createFileAndWaitForReplication(final Path path, + final long fileLen) + throws Exception { + DFSTestUtil.createFile(cluster.getFileSystem(), path, fileLen, + REPLICATION_FACTOR, 1000L); + DFSTestUtil.waitForReplication(cluster.getFileSystem(), path, + REPLICATION_FACTOR, WAIT_TIMEOUT_MS); + } + @Test public void testPlacement() throws Exception { - final long fileSize = DEFAULT_BLOCK_SIZE * 5; - final String testFile = new String("/testfile"); + final long fileSize = FILE_SIZE; + final String testFile = "/testfile"; final Path path = new Path(testFile); - DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, - REPLICATION_FACTOR, 1000L); + createFileAndWaitForReplication(path, FILE_SIZE); LocatedBlocks locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks( path.toString(), 0, fileSize); for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) { Set locs = new HashSet<>(); for(DatanodeInfo datanodeInfo : block.getLocations()) { - if (datanodeInfo.getAdminState() == DatanodeInfo.AdminStates.NORMAL) { + if (datanodeInfo.getAdminState() + .equals(DatanodeInfo.AdminStates.NORMAL)) { locs.add(datanodeInfo); } } for (DatanodeID datanodeID : expectedDatanodeIDs) { - assertTrue(locs.contains(datanodeID)); + Assert.assertTrue(locs.contains(datanodeID)); } } } @Test(timeout = 300000) public void testPlacementAfterDecommission() throws Exception { - final long fileSize = DEFAULT_BLOCK_SIZE * 5; - final String testFile = new String("/testfile"); + final long fileSize = FILE_SIZE; + final String testFile = "/testfile-afterdecomm"; final Path path = new Path(testFile); - DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, - REPLICATION_FACTOR, 1000L); + createFileAndWaitForReplication(path, fileSize); // Decommission some nodes and wait until decommissions have finished. refreshDatanodeAdminProperties2(); + GenericTestUtils.waitFor(new Supplier() { @Override public Boolean get() { - boolean successful = true; LocatedBlocks locatedBlocks; try { locatedBlocks = @@ -231,32 +229,34 @@ public Boolean get() { } catch (IOException ioe) { return false; } - for(LocatedBlock block : locatedBlocks.getLocatedBlocks()) { + for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) { Set locs = new HashSet<>(); for (DatanodeInfo datanodeInfo : block.getLocations()) { - if (datanodeInfo.getAdminState() == - DatanodeInfo.AdminStates.NORMAL) { + if (datanodeInfo.getAdminState().equals( + DatanodeInfo.AdminStates.NORMAL)) { locs.add(datanodeInfo); } } for (DatanodeID datanodeID : expectedDatanodeIDs) { - successful = successful && locs.contains(datanodeID); + if (!locs.contains(datanodeID)) { + return false; + } } } - return successful; + return true; } - }, 1000, 60000); + }, 1000, WAIT_TIMEOUT_MS); // Verify block placement policy of each block. - LocatedBlocks locatedBlocks; - locatedBlocks = + LocatedBlocks locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks( path.toString(), 0, fileSize); - for(LocatedBlock block : locatedBlocks.getLocatedBlocks()) { - BlockPlacementStatus status = cluster.getNamesystem().getBlockManager(). - getBlockPlacementPolicy().verifyBlockPlacement( - block.getLocations(), REPLICATION_FACTOR); - assertTrue(status.isPlacementPolicySatisfied()); + for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) { + BlockPlacementStatus status = + cluster.getNamesystem().getBlockManager() + .getBlockPlacementPolicy() + .verifyBlockPlacement(block.getLocations(), REPLICATION_FACTOR); + Assert.assertTrue(status.isPlacementPolicySatisfied()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index 261bf8cf6af2e..8d712c11221f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -37,9 +37,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.LongAccumulator; -import com.google.common.base.Function; -import com.google.common.base.Joiner; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,7 +59,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Static utility functions useful for testing HA. @@ -304,15 +302,11 @@ public static void setFailoverConfigurations(Configuration conf, String logicalN public static

    > void setFailoverConfigurations(Configuration conf, String logicalName, List nnAddresses, Class

    classFPP) { - setFailoverConfigurations(conf, logicalName, - Iterables.transform(nnAddresses, new Function() { - - // transform the inet address to a simple string - @Override - public String apply(InetSocketAddress addr) { - return "hdfs://" + addr.getHostName() + ":" + addr.getPort(); - } - }), classFPP); + final List addresses = new ArrayList(); + nnAddresses.forEach( + addr -> addresses.add( + "hdfs://" + addr.getHostName() + ":" + addr.getPort())); + setFailoverConfigurations(conf, logicalName, addresses, classFPP); } public static

    > diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index defa6e53e70d9..e9a8421fe2ee5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -27,7 +27,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -48,7 +48,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; public class TestBootstrapStandby { private static final Logger LOG = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java index 7739bc78e409b..1e6dae7def73c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java @@ -38,7 +38,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * Test BootstrapStandby when QJM is used for shared edits. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java index 96067858c9df0..854027a116497 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.Collections; import java.util.concurrent.TimeUnit; @@ -30,9 +31,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -108,7 +112,8 @@ public void testRequeueCall() throws Exception { final int observerIdx = 2; NameNode nn = dfsCluster.getNameNode(observerIdx); int port = nn.getNameNodeAddress().getPort(); - Configuration configuration = dfsCluster.getConfiguration(observerIdx); + Configuration originalConf = dfsCluster.getConfiguration(observerIdx); + Configuration configuration = new Configuration(originalConf); String prefix = CommonConfigurationKeys.IPC_NAMESPACE + "." + port + "."; configuration.set(prefix + CommonConfigurationKeys.IPC_SCHEDULER_IMPL_KEY, TestRpcScheduler.class.getName()); @@ -125,6 +130,8 @@ public void testRequeueCall() throws Exception { // be triggered and client should retry active NN. dfs.getFileStatus(testPath); assertSentTo(0); + // reset the original call queue + NameNodeAdapter.getRpcServer(nn).refreshCallQueue(originalConf); } @Test @@ -194,7 +201,7 @@ private void testMsync(boolean autoMsync, long autoMsyncPeriodMs) // Therefore, the subsequent getFileStatus call should succeed. if (!autoMsync) { // If not testing auto-msync, perform an explicit one here - dfs2.getClient().msync(); + dfs2.msync(); } else if (autoMsyncPeriodMs > 0) { Thread.sleep(autoMsyncPeriodMs); } @@ -383,6 +390,35 @@ public void testRequestFromNonObserverProxyProvider() throws Exception { } } + @Test(timeout=10000) + public void testMsyncFileContext() throws Exception { + NameNode nn0 = dfsCluster.getNameNode(0); + NameNode nn2 = dfsCluster.getNameNode(2); + HAServiceStatus st = nn0.getRpcServer().getServiceStatus(); + assertEquals("nn0 is not active", HAServiceState.ACTIVE, st.getState()); + st = nn2.getRpcServer().getServiceStatus(); + assertEquals("nn2 is not observer", HAServiceState.OBSERVER, st.getState()); + + FileContext fc = FileContext.getFileContext(conf); + // initialize observer proxy for FileContext + fc.getFsStatus(testPath); + + Path p = new Path(testPath, "testMsyncFileContext"); + fc.mkdir(p, FsPermission.getDefault(), true); + fc.msync(); + dfsCluster.rollEditLogAndTail(0); + LOG.info("State id active = {}, Stat id observer = {}", + nn0.getNamesystem().getFSImage().getLastAppliedOrWrittenTxId(), + nn2.getNamesystem().getFSImage().getLastAppliedOrWrittenTxId()); + try { + // if getFileStatus is taking too long due to server requeueing + // the test will time out + fc.getFileStatus(p); + } catch (FileNotFoundException e) { + fail("File should exist on Observer after msync"); + } + } + private void assertSentTo(int nnIdx) throws IOException { assertTrue("Request was not sent to the expected namenode " + nnIdx, HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIdx)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java index c470cc6f1d52e..6ad237bef3861 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java @@ -53,7 +53,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** * Tests for upgrading with HA enabled. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index 625563a914bc8..ebd556e0c21a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -29,8 +29,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadLocalRandom; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java index ee00da419cae6..cf2674682d2f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java @@ -36,7 +36,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 636039d6f7275..2ef48a31f9dc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index 42b9660c6111a..38e7df5f392cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -62,7 +62,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.mockito.Mockito; @RunWith(Parameterized.class) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java index 4387a3372bccb..bd4cb1fa2d80b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java @@ -44,8 +44,8 @@ import org.junit.Test; -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Test cases for the handling of edit logs during failover diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java index 9506a2f399a5d..0a0f8ebbf2a3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java @@ -45,7 +45,7 @@ import org.apache.hadoop.util.ExitUtil.ExitException; import org.junit.Test; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; public class TestFailureOfSharedDir { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java index 58efc0fa2e34d..31fcb14e27b5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java @@ -61,7 +61,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @RunWith(Parameterized.class) public class TestFailureToReadEdits { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java index 1fccb3c51fd29..d835e230c6f73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java @@ -60,7 +60,8 @@ public void testMultipleAppendsDuringCatchupTailing() throws Exception { // control the ingest of edits by the standby for this test. conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "5000"); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, -1); - + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, + false); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java index b3833728a973d..969d315dc02dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java @@ -31,7 +31,7 @@ import java.util.Collection; import java.util.List; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index 8de09cf6a97b6..176b981a6a534 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -72,8 +72,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Tests that exercise safemode in an HA cluster. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java index 59c9695060ac2..5622edb3d26d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java index e23bb24ef5606..9cd6c6a1061a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java index 5063acdc8af19..a09bfbacd37be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.test.GenericTestUtils; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java index bf93a379815a9..83dbeeb3a2c1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java @@ -62,7 +62,7 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Test cases regarding pipeline recovery during NN failover. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 1730a662b297e..25b800a19614e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import com.google.common.base.Supplier; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.hdfs.LogVerificationAppender; +import org.apache.log4j.spi.LoggingEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -287,6 +289,49 @@ public void testStandbyAndObserverState() throws Exception { cluster.transitionToStandby(2); } + /** + * Tests that a null FSImage is handled gracefully by the ImageServlet. + * If putImage is called while a NameNode is still starting up, the FSImage + * may not have been initialized yet. See HDFS-15290. + */ + @Test(timeout = 30000) + public void testCheckpointBeforeNameNodeInitializationIsComplete() + throws Exception { + final LogVerificationAppender appender = new LogVerificationAppender(); + final org.apache.log4j.Logger logger = org.apache.log4j.Logger + .getRootLogger(); + logger.addAppender(appender); + + // Transition 2 to observer + cluster.transitionToObserver(2); + doEdits(0, 10); + // After a rollEditLog, Standby(nn1)'s next checkpoint would be + // ahead of observer(nn2). + nns[0].getRpcServer().rollEditLog(); + + NameNode nn2 = nns[2]; + FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null); + + // After standby creating a checkpoint, it will try to push the image to + // active and all observer, updating it's own txid to the most recent. + HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12)); + HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12)); + + NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage); + cluster.transitionToStandby(2); + logger.removeAppender(appender); + + for (LoggingEvent event : appender.getLog()) { + String message = event.getRenderedMessage(); + if (message.contains("PutImage failed") && + message.contains("FSImage has not been set in the NameNode.")) { + //Logs have the expected exception. + return; + } + } + fail("Expected exception not present in logs."); + } + /** * Test for the case when the SBN is configured to checkpoint based * on a time period, but no transactions are happening on the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java index 839407389bac9..4f7b993d76800 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java @@ -42,14 +42,15 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getFileInfo; +import static org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager.QJM_RPC_MAX_TXNS_KEY; import org.junit.After; import org.junit.Before; import org.junit.Test; -import com.google.common.base.Joiner; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Test cases for in progress tailing edit logs by @@ -72,6 +73,8 @@ public void startUp() throws IOException { conf.setBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, true); conf.setInt(DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_KEY, 500); + // Set very samll limit of transactions per a journal rpc call + conf.setInt(QJM_RPC_MAX_TXNS_KEY, 3); HAUtil.setAllowStandbyReads(conf, true); qjmhaCluster = new MiniQJMHACluster.Builder(conf).build(); cluster = qjmhaCluster.getDfsCluster(); @@ -300,6 +303,31 @@ public void testNewStartInProgressTail() throws Exception { waitForFileInfo(nn1, "/test", "/test2", "/test3"); } + /** + * Test that Standby Node tails multiple segments while catching up + * during the transition to Active. + */ + @Test + public void testUndertailingWhileFailover() throws Exception { + cluster.transitionToActive(0); + cluster.waitActive(0); + + String p = "/testFailoverWhileTailingWithoutCache/"; + mkdirs(nn0, p + 0, p + 1, p + 2, p + 3, p + 4); + nn0.getRpcServer().rollEditLog(); // create segment 1 + + mkdirs(nn0, p + 5, p + 6, p + 7, p + 8, p + 9); + nn0.getRpcServer().rollEditLog(); // create segment 2 + + mkdirs(nn0, p + 10, p + 11, p + 12, p + 13, p + 14); + nn0.getRpcServer().rollEditLog(); // create segment 3 + + cluster.transitionToStandby(0); + cluster.transitionToActive(1); + cluster.waitActive(1); + waitForFileInfo(nn1, p + 0, p + 1, p + 14); + } + @Test public void testNonUniformConfig() throws Exception { // Test case where some NNs (in this case the active NN) in the cluster diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java index 04eae6f23ecd1..73f15cf9328df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java @@ -43,7 +43,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * The hotornot.com of unit tests: makes sure that the standby not only diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestAddBlockTailing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java similarity index 61% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestAddBlockTailing.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java index 48c09eda7948c..1462314f01445 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestAddBlockTailing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java @@ -22,9 +22,13 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.EnumSet; +import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -43,17 +47,18 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; /** - * Tests the race condition that IBR and add block may result + * Tests the race condition that IBR and update block may result * in inconsistent block genstamp. */ -public class TestAddBlockTailing { +public class TestUpdateBlockTailing { private static final int BLOCK_SIZE = 8192; - private static final String TEST_DIR = "/TestAddBlockTailing"; + private static final String TEST_DIR = "/TestUpdateBlockTailing"; private static MiniQJMHACluster qjmhaCluster; private static MiniDFSCluster dfsCluster; @@ -87,6 +92,12 @@ public static void shutDownCluster() throws IOException { } } + @Before + public void reset() throws Exception { + dfsCluster.transitionToStandby(1); + dfsCluster.transitionToActive(0); + } + @Test public void testStandbyAddBlockIBRRace() throws Exception { String testFile = TEST_DIR +"/testStandbyAddBlockIBRRace"; @@ -161,4 +172,103 @@ public void testStandbyAddBlockIBRRace() throws Exception { rpc1.delete(testFile, false); } + + @Test + public void testStandbyAppendBlock() throws Exception { + final String testFile = TEST_DIR +"/testStandbyAppendBlock"; + final long fileLen = 1 << 16; + // Create a file + DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short)1, 0); + // NN1 tails OP_SET_GENSTAMP_V2 and OP_ADD_BLOCK + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Append block without newBlock flag + try (FSDataOutputStream out = dfs.append(new Path(testFile))) { + final byte[] data = new byte[1 << 16]; + ThreadLocalRandom.current().nextBytes(data); + out.write(data); + } + + // NN1 tails OP_APPEND, OP_SET_GENSTAMP_V2, and OP_UPDATE_BLOCKS + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Remove the testFile + final ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer(); + rpc0.delete(testFile, false); + } + + @Test + public void testStandbyAppendNewBlock() throws Exception { + final String testFile = TEST_DIR +"/testStandbyAppendNewBlock"; + final long fileLen = 1 << 16; + // Create a file + DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short)1, 0); + // NN1 tails OP_SET_GENSTAMP_V2 and OP_ADD_BLOCK + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Append block with newBlock flag + try (FSDataOutputStream out = dfs.append(new Path(testFile), + EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null)) { + final byte[] data = new byte[1 << 16]; + ThreadLocalRandom.current().nextBytes(data); + out.write(data); + } + + // NN1 tails OP_APPEND, OP_SET_GENSTAMP_V2, and OP_ADD_BLOCK + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Remove the testFile + final ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer(); + rpc0.delete(testFile, false); + } + + @Test + public void testStandbyTruncateBlock() throws Exception { + final String testFile = TEST_DIR +"/testStandbyTruncateBlock"; + final long fileLen = 1 << 16; + // Create a file + DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short)1, 0); + // NN1 tails OP_SET_GENSTAMP_V2 and OP_ADD_BLOCK + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Truncate block + dfs.truncate(new Path(testFile), fileLen/2); + + // NN1 tails OP_SET_GENSTAMP_V2 and OP_TRUNCATE + fsn0.getEditLog().logSync(); + fsn1.getEditLogTailer().doTailEdits(); + assertEquals("Global Generation stamps on NN0 and " + + "NN1 should be equal", + NameNodeAdapter.getGenerationStamp(fsn0), + NameNodeAdapter.getGenerationStamp(fsn1)); + + // Remove the testFile + final ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer(); + rpc0.delete(testFile, false); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 1eab42a270eaf..349b7ac24112d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -48,7 +48,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Random; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java index d13cc38cb8a62..d57a7344fe024 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics; import org.apache.hadoop.http.HttpRequestLog; import org.apache.hadoop.http.HttpServer2; -import org.apache.hadoop.ipc.ProtobufRpcEngine.Server; +import org.apache.hadoop.ipc.ProtobufRpcEngine2.Server; import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java index ed8738085d3ff..ea9c5e2bb1437 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java @@ -55,7 +55,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Tests interaction of ACLs with snapshots. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java index e73704c1c468a..d91fea9cfd29b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java index 80af690c2acad..662957fe7a2b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index fcc3c1b6990fd..3bfe971aef97d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -310,7 +310,46 @@ public void testRenameDirectoryInSnapshot() throws Exception { assertTrue(existsInDiffReport(entries, DiffType.RENAME, sub2.getName(), sub3.getName())); } - + + @Test (timeout=60000) + public void testRenameDirectoryAndFileInSnapshot() throws Exception { + final Path sub2 = new Path(sub1, "sub2"); + final Path sub3 = new Path(sub1, "sub3"); + final Path sub2file1 = new Path(sub2, "file1"); + final Path sub2file2 = new Path(sub2, "file2"); + final Path sub3file2 = new Path(sub3, "file2"); + final Path sub3file3 = new Path(sub3, "file3"); + final String sub1snap1 = "sub1snap1"; + final String sub1snap2 = "sub1snap2"; + final String sub1snap3 = "sub1snap3"; + final String sub1snap4 = "sub1snap4"; + hdfs.mkdirs(sub1); + hdfs.mkdirs(sub2); + DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPL, SEED); + SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap1); + hdfs.rename(sub2file1, sub2file2); + SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap2); + + // First rename the sub-directory. + hdfs.rename(sub2, sub3); + SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap3); + hdfs.rename(sub3file2, sub3file3); + SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap4); + hdfs.deleteSnapshot(sub1, sub1snap1); + hdfs.deleteSnapshot(sub1, sub1snap2); + hdfs.deleteSnapshot(sub1, sub1snap3); + // check the internal details + INode sub3file3Inode = fsdir.getINode4Write(sub3file3.toString()); + INodeReference ref = sub3file3Inode + .asReference(); + INodeReference.WithCount withCount = (WithCount) ref + .getReferredINode(); + Assert.assertEquals(withCount.getReferenceCount(), 1); + // Ensure name list is empty for the reference sub3file3Inode + Assert.assertNull(withCount.getLastWithName()); + Assert.assertTrue(sub3file3Inode.isInCurrentState()); + } + /** * After the following steps: *

    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
    index d8e53bb2abb5e..0a262f899abf2 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
    @@ -513,6 +513,59 @@ public void testDeletionSnapshotMtime() throws Exception {
             newSnapshotStatus.getModificationTime());
       }
     
    +  /**
    +   * HDFS-15446 - ensure that snapshot operations on /.reserved/raw
    +   * paths work and the NN can load the resulting edits.
    +   */
    +  @Test(timeout = 60000)
    +  public void testSnapshotOpsOnReservedPath() throws Exception {
    +    Path dir = new Path("/dir");
    +    Path nestedDir = new Path("/nested/dir");
    +    Path sub = new Path(dir, "sub");
    +    Path subFile = new Path(sub, "file");
    +    Path nestedFile = new Path(nestedDir, "file");
    +    DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed);
    +    DFSTestUtil.createFile(hdfs, nestedFile, BLOCKSIZE, REPLICATION, seed);
    +
    +    hdfs.allowSnapshot(dir);
    +    hdfs.allowSnapshot(nestedDir);
    +    Path reservedDir = new Path("/.reserved/raw/dir");
    +    Path reservedNestedDir = new Path("/.reserved/raw/nested/dir");
    +    hdfs.createSnapshot(reservedDir, "s1");
    +    hdfs.createSnapshot(reservedNestedDir, "s1");
    +    hdfs.renameSnapshot(reservedDir, "s1", "s2");
    +    hdfs.renameSnapshot(reservedNestedDir, "s1", "s2");
    +    hdfs.deleteSnapshot(reservedDir, "s2");
    +    hdfs.deleteSnapshot(reservedNestedDir, "s2");
    +    // The original problem with reserved path, is that the NN was unable to
    +    // replay the edits, therefore restarting the NN to ensure it starts
    +    // and no exceptions are raised.
    +    cluster.restartNameNode(true);
    +  }
    +
    +  /**
    +   * HDFS-15446 - ensure that snapshot operations on /.reserved/raw
    +   * paths work and the NN can load the resulting edits. This test if for
    +   * snapshots at the root level.
    +   */
    +  @Test(timeout = 60000)
    +  public void testSnapshotOpsOnRootReservedPath() throws Exception {
    +    Path dir = new Path("/");
    +    Path sub = new Path(dir, "sub");
    +    Path subFile = new Path(sub, "file");
    +    DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed);
    +
    +    hdfs.allowSnapshot(dir);
    +    Path reservedDir = new Path("/.reserved/raw");
    +    hdfs.createSnapshot(reservedDir, "s1");
    +    hdfs.renameSnapshot(reservedDir, "s1", "s2");
    +    hdfs.deleteSnapshot(reservedDir, "s2");
    +    // The original problem with reserved path, is that the NN was unable to
    +    // replay the edits, therefore restarting the NN to ensure it starts
    +    // and no exceptions are raised.
    +    cluster.restartNameNode(true);
    +  }
    +
       /**
        * Prepare a list of modifications. A modification may be a file creation,
        * file deletion, or a modification operation such as appending to an existing
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
    index 90e499d8a3560..814da03559beb 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
    @@ -128,6 +128,7 @@ public void testSnapshotfileLength() throws Exception {
             hdfs.getFileChecksum(file1snap1), is(snapChksum1));
         try {
           AppendTestUtil.write(out, 0, toAppend);
    +      out.hflush();
           // Test reading from snapshot of file that is open for append
           byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
           assertThat("Wrong data size in snapshot.",
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
    index 1d46e4ee92985..818f56ae3854d 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
    @@ -33,6 +33,7 @@
     import org.apache.hadoop.hdfs.DistributedFileSystem;
     import org.apache.hadoop.hdfs.MiniDFSCluster;
     import org.apache.hadoop.hdfs.protocol.HdfsConstants;
    +import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
     import org.apache.hadoop.hdfs.protocol.SnapshotException;
     import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
     import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
    @@ -41,6 +42,7 @@
     import org.apache.hadoop.hdfs.util.ReadOnlyList;
     import org.apache.hadoop.ipc.RemoteException;
     import org.apache.hadoop.test.GenericTestUtils;
    +import org.apache.hadoop.test.LambdaTestUtils;
     import org.junit.After;
     import org.junit.Before;
     import org.junit.Rule;
    @@ -280,4 +282,150 @@ public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
           System.setErr(oldErr);
         }
       }
    +
    +  // Test rename of a snapshotted by setting quota in same directory.
    +  @Test
    +  public void testQuotaAndRenameWithSnapshot() throws Exception {
    +    String dirr = "/dir2";
    +    Path dir2 = new Path(dirr);
    +    Path fil1 = new Path(dir2, "file1");
    +    hdfs.mkdirs(dir2);
    +    hdfs.setQuota(dir2, 3, 0);
    +    hdfs.allowSnapshot(dir2);
    +    hdfs.create(fil1);
    +    hdfs.createSnapshot(dir2, "snap1");
    +    Path file2 = new Path(dir2, "file2");
    +    hdfs.rename(fil1, file2);
    +    hdfs.getFileStatus(dir2);
    +    Path filex = new Path(dir2, "filex");
    +    // create a file after exceeding namespace quota
    +    LambdaTestUtils.intercept(NSQuotaExceededException.class,
    +        "The NameSpace quota (directories and files) of "
    +            + "directory /dir2 is exceeded",
    +        () -> hdfs.create(filex));
    +    hdfs.createSnapshot(dir2, "snap2");
    +    // rename a file after exceeding namespace quota
    +    Path file3 = new Path(dir2, "file3");
    +    LambdaTestUtils
    +        .intercept(NSQuotaExceededException.class,
    +            "The NameSpace quota (directories and files) of"
    +                + " directory /dir2 is exceeded",
    +            () -> hdfs.rename(file2, file3));
    +  }
    +
    +  // Test Rename across directories within snapshot with quota set on source
    +  // directory.
    +  @Test
    +  public void testRenameAcrossDirWithinSnapshot() throws Exception {
    +    // snapshottable directory
    +    String dirr = "/dir";
    +    Path rootDir = new Path(dirr);
    +    hdfs.mkdirs(rootDir);
    +    hdfs.allowSnapshot(rootDir);
    +
    +    // set quota for source directory under snapshottable root directory
    +    Path dir2 = new Path(rootDir, "dir2");
    +    Path fil1 = new Path(dir2, "file1");
    +    hdfs.mkdirs(dir2);
    +    hdfs.setQuota(dir2, 3, 0);
    +    hdfs.create(fil1);
    +    Path file2 = new Path(dir2, "file2");
    +    hdfs.rename(fil1, file2);
    +    Path fil3 = new Path(dir2, "file3");
    +    hdfs.create(fil3);
    +
    +    // destination directory under snapshottable root directory
    +    Path dir1 = new Path(rootDir, "dir1");
    +    Path dir1fil1 = new Path(dir1, "file1");
    +    hdfs.mkdirs(dir1);
    +    hdfs.create(dir1fil1);
    +    Path dir1fil2 = new Path(dir1, "file2");
    +    hdfs.rename(dir1fil1, dir1fil2);
    +
    +    hdfs.createSnapshot(rootDir, "snap1");
    +    Path filex = new Path(dir2, "filex");
    +    // create a file after exceeding namespace quota
    +    LambdaTestUtils.intercept(NSQuotaExceededException.class,
    +        "The NameSpace quota (directories and files) of "
    +            + "directory /dir/dir2 is exceeded",
    +        () -> hdfs.create(filex));
    +
    +    // Rename across directories within snapshot with quota set on source
    +    // directory
    +    assertTrue(hdfs.rename(fil3, dir1));
    +  }
    +
    +  // Test rename within the same directory within a snapshottable root with
    +  // quota set.
    +  @Test
    +  public void testRenameInSameDirWithSnapshotableRoot() throws Exception {
    +
    +    // snapshottable root directory
    +    String rootStr = "/dir";
    +    Path rootDir = new Path(rootStr);
    +    hdfs.mkdirs(rootDir);
    +    hdfs.setQuota(rootDir, 3, 0);
    +    hdfs.allowSnapshot(rootDir);
    +
    +    // rename to be performed directory
    +    String dirr = "dir2";
    +    Path dir2 = new Path(rootDir, dirr);
    +    Path fil1 = new Path(dir2, "file1");
    +    hdfs.mkdirs(dir2);
    +    hdfs.create(fil1);
    +    hdfs.createSnapshot(rootDir, "snap1");
    +    Path file2 = new Path(dir2, "file2");
    +    // rename a file after exceeding namespace quota
    +    LambdaTestUtils
    +        .intercept(NSQuotaExceededException.class,
    +            "The NameSpace quota (directories and files) of"
    +                + " directory /dir is exceeded",
    +            () -> hdfs.rename(fil1, file2));
    +
    +  }
    +
    +  // Test rename from a directory under snapshottable root to a directory with
    +  // quota set to a directory not under under any snapshottable root.
    +  @Test
    +  public void testRenameAcrossDirWithSnapshotableSrc() throws Exception {
    +    // snapshottable directory
    +    String dirr = "/dir";
    +    Path rootDir = new Path(dirr);
    +    hdfs.mkdirs(rootDir);
    +    hdfs.allowSnapshot(rootDir);
    +
    +    // set quota for source directory
    +    Path dir2 = new Path(rootDir, "dir2");
    +    Path fil1 = new Path(dir2, "file1");
    +    hdfs.mkdirs(dir2);
    +    hdfs.setQuota(dir2, 3, 0);
    +    hdfs.create(fil1);
    +    Path file2 = new Path(dir2, "file2");
    +    hdfs.rename(fil1, file2);
    +    Path fil3 = new Path(dir2, "file3");
    +    hdfs.create(fil3);
    +
    +    hdfs.createSnapshot(rootDir, "snap1");
    +
    +    // destination directory not under any snapshot
    +    String dirr1 = "/dir1";
    +    Path dir1 = new Path(dirr1);
    +    Path dir1fil1 = new Path(dir1, "file1");
    +    hdfs.mkdirs(dir1);
    +    hdfs.create(dir1fil1);
    +    Path dir1fil2 = new Path(dir1, "file2");
    +    hdfs.rename(dir1fil1, dir1fil2);
    +
    +    Path filex = new Path(dir2, "filex");
    +    // create a file after exceeding namespace quota on source
    +    LambdaTestUtils.intercept(NSQuotaExceededException.class,
    +        "The NameSpace quota (directories and files) of "
    +            + "directory /dir/dir2 is exceeded",
    +        () -> hdfs.create(filex));
    +
    +    // Rename across directories source dir under snapshot with quota set and
    +    // destination directory not under any snapshot
    +    assertTrue(hdfs.rename(fil3, dir1));
    +  }
    +
     }
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
    index 018a5dc69d3ed..664f459ebae01 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
    @@ -52,7 +52,7 @@
     import org.slf4j.Logger;
     import org.slf4j.LoggerFactory;
     
    -import com.google.common.base.Supplier;
    +import java.util.function.Supplier;
     
     /**
      * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
    index 494ed08dbeab0..f025531269e28 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
    @@ -17,12 +17,16 @@
      */
     package org.apache.hadoop.hdfs.server.namenode.top.window;
     
    +import java.util.HashMap;
     import java.util.List;
    +import java.util.Map;
    +import java.util.Random;
     
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.hdfs.DFSConfigKeys;
    -import org.apache.log4j.Level;
    -import org.apache.log4j.Logger;
    +import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
    +
    +import org.junit.Assert;
     import org.junit.Before;
     import org.junit.Test;
     
    @@ -56,7 +60,7 @@ public void init() {
       }
     
       @Test
    -  public void testTops() {
    +  public void testTops() throws Exception {
         long time = WINDOW_LEN_MS + BUCKET_LEN * 3 / 2;
         for (int i = 0; i < users.length; i++)
           manager.recordMetric(time, "open", users[i], (i + 1) * 2);
    @@ -66,11 +70,12 @@ public void testTops() {
         time++;
         TopWindow tops = manager.snapshot(time);
     
    -    assertEquals("Unexpected number of ops", 2, tops.getOps().size());
    +    assertEquals("Unexpected number of ops", 3, tops.getOps().size());
    +    assertEquals(TopConf.ALL_CMDS, tops.getOps().get(0).getOpType());
         for (Op op : tops.getOps()) {
           final List topUsers = op.getTopUsers();
           assertEquals("Unexpected number of users", N_TOP_USERS, topUsers.size());
    -      if (op.getOpType() == "open") {
    +      if (op.getOpType().equals("open")) {
             for (int i = 0; i < topUsers.size(); i++) {
               User user = topUsers.get(i);
               assertEquals("Unexpected count for user " + user.getUser(),
    @@ -86,8 +91,9 @@ public void testTops() {
         // move the window forward not to see the "open" results
         time += WINDOW_LEN_MS - 2;
         tops = manager.snapshot(time);
    -    assertEquals("Unexpected number of ops", 1, tops.getOps().size());
    -    final Op op = tops.getOps().get(0);
    +    assertEquals("Unexpected number of ops", 2, tops.getOps().size());
    +    assertEquals(TopConf.ALL_CMDS, tops.getOps().get(0).getOpType());
    +    final Op op = tops.getOps().get(1);
         assertEquals("Should only see close ops", "close", op.getOpType());
         final List topUsers = op.getTopUsers();
         for (int i = 0; i < topUsers.size(); i++) {
    @@ -99,4 +105,158 @@ public void testTops() {
         assertEquals("Unexpected total count for op",
             (1 + users.length) * (users.length / 2), op.getTotalCount());
       }
    +
    +  @Test
    +  public void windowReset() throws Exception {
    +    Configuration config = new Configuration();
    +    config.setInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY, 1);
    +    config.setInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY, N_TOP_USERS);
    +    int period = 2;
    +    RollingWindowManager rollingWindowManager =
    +        new RollingWindowManager(config, period);
    +    rollingWindowManager.recordMetric(0, "op1", users[0], 3);
    +    checkValues(rollingWindowManager, 0, "op1", 3, 3);
    +    checkValues(rollingWindowManager, period - 1, "op1", 3, 3);
    +    checkValues(rollingWindowManager, period, "op1", 0, 0);
    +  }
    +
    +  @Test
    +  public void testTotal() throws Exception {
    +    Configuration config = new Configuration();
    +    config.setInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY, 1);
    +    config.setInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY, N_TOP_USERS);
    +    int period = 10;
    +    RollingWindowManager rollingWindowManager =
    +        new RollingWindowManager(config, period);
    +
    +    long t = 0;
    +    rollingWindowManager.recordMetric(t, "op1", users[0], 3);
    +    checkValues(rollingWindowManager, t, "op1", 3, 3);
    +
    +    // both should have a value.
    +    t = (long)(period * .5);
    +    rollingWindowManager.recordMetric(t, "op2", users[0], 4);
    +    checkValues(rollingWindowManager, t, "op1", 3, 7);
    +    checkValues(rollingWindowManager, t, "op2", 4, 7);
    +
    +    // neither should reset.
    +    t = period - 1;
    +    checkValues(rollingWindowManager, t, "op1", 3, 7);
    +    checkValues(rollingWindowManager, t, "op2", 4, 7);
    +
    +    // op1 should reset in its next period, but not op2.
    +    t = period;
    +    rollingWindowManager.recordMetric(10, "op1", users[0], 10);
    +    checkValues(rollingWindowManager, t, "op1", 10, 14);
    +    checkValues(rollingWindowManager, t, "op2", 4, 14);
    +
    +    // neither should reset.
    +    t = (long)(period * 1.25);
    +    rollingWindowManager.recordMetric(t, "op2", users[0], 7);
    +    checkValues(rollingWindowManager, t, "op1", 10, 21);
    +    checkValues(rollingWindowManager, t, "op2", 11, 21);
    +
    +    // op2 should reset.
    +    t = (long)(period * 1.5);
    +    rollingWindowManager.recordMetric(t, "op2", users[0], 13);
    +    checkValues(rollingWindowManager, t, "op1", 10, 23);
    +    checkValues(rollingWindowManager, t, "op2", 13, 23);
    +  }
    +
    +  @Test
    +  public void testWithFuzzing() throws Exception {
    +    Configuration config = new Configuration();
    +    config.setInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY, 1);
    +    config.setInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY, N_TOP_USERS);
    +    int period = users.length/2;
    +    RollingWindowManager rollingWindowManager =
    +        new RollingWindowManager(config, period);
    +
    +    String[] ops = {"op1", "op2", "op3", "op4"};
    +    Random rand = new Random();
    +    for (int i=0; i < 10000; i++) {
    +      rollingWindowManager.recordMetric(i, ops[rand.nextInt(ops.length)],
    +          users[rand.nextInt(users.length)],
    +          rand.nextInt(100));
    +      TopWindow window = rollingWindowManager.snapshot(i);
    +      checkTotal(window);
    +    }
    +  }
    +
    +  @Test
    +  public void testOpTotal() throws Exception {
    +    int numTopUsers = 2;
    +    Configuration config = new Configuration();
    +    config.setInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY, 1);
    +    config.setInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY, numTopUsers);
    +    int period = users.length/2;
    +    RollingWindowManager rollingWindowManager =
    +        new RollingWindowManager(config, period);
    +
    +    int numOps = 3;
    +    rollingWindowManager.recordMetric(0, "op1", "user1", 10);
    +    rollingWindowManager.recordMetric(0, "op1", "user2", 20);
    +    rollingWindowManager.recordMetric(0, "op1", "user3", 30);
    +
    +    rollingWindowManager.recordMetric(0, "op2", "user1", 1);
    +    rollingWindowManager.recordMetric(0, "op2", "user4", 40);
    +    rollingWindowManager.recordMetric(0, "op2", "user5", 50);
    +
    +    rollingWindowManager.recordMetric(0, "op3", "user6", 1);
    +    rollingWindowManager.recordMetric(0, "op3", "user7", 11);
    +    rollingWindowManager.recordMetric(0, "op3", "user8", 1);
    +
    +    TopWindow window = rollingWindowManager.snapshot(0);
    +    Assert.assertEquals(numOps + 1, window.getOps().size());
    +
    +    Op allOp = window.getOps().get(0);
    +    Assert.assertEquals(TopConf.ALL_CMDS, allOp.getOpType());
    +    List topUsers = allOp.getTopUsers();
    +    Assert.assertEquals(numTopUsers * numOps, topUsers.size());
    +    // ensure all the top users for each op are present in the total op.
    +    for (int i = 1; i < numOps; i++) {
    +      Assert.assertTrue(
    +          topUsers.containsAll(window.getOps().get(i).getTopUsers()));
    +    }
    +  }
    +
    +  private void checkValues(RollingWindowManager rwManager, long time,
    +      String opType, long value, long expectedTotal) throws Exception {
    +    TopWindow window = rwManager.snapshot(time);
    +    for (Op windowOp : window.getOps()) {
    +      if (opType.equals(windowOp.getOpType())) {
    +        assertEquals(value, windowOp.getTotalCount());
    +        break;
    +      }
    +    }
    +    assertEquals(expectedTotal, checkTotal(window));
    +  }
    +
    +  private long checkTotal(TopWindow window) throws Exception {
    +    long allOpTotal = 0;
    +    long computedOpTotal = 0;
    +
    +    Map userOpTally = new HashMap<>();
    +    for (String user : users) {
    +      userOpTally.put(user, new User(user, 0));
    +    }
    +    for (Op windowOp : window.getOps()) {
    +      int multiplier;
    +      if (TopConf.ALL_CMDS.equals(windowOp.getOpType())) {
    +        multiplier = -1;
    +        allOpTotal += windowOp.getTotalCount();
    +      } else {
    +        multiplier = 1;
    +        computedOpTotal += windowOp.getTotalCount();
    +      }
    +      for (User user : windowOp.getAllUsers()) {
    +        userOpTally.get(user.getUser()).add((int)(multiplier*user.getCount()));
    +      }
    +    }
    +    assertEquals(allOpTotal, computedOpTotal);
    +    for (String user : userOpTally.keySet()) {
    +      assertEquals(0, userOpTally.get(user).getCount());
    +    }
    +    return computedOpTotal;
    +  }
     }
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
    index 8b9e7ce710c2b..e3b775aa7faef 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
    @@ -98,7 +98,7 @@
     import org.slf4j.Logger;
     import org.slf4j.LoggerFactory;
     
    -import com.google.common.base.Supplier;
    +import java.util.function.Supplier;
     
     /**
      * Tests the external sps service plugins.
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
    index b2da6a2fca386..c9d6db00bb828 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
    @@ -87,9 +87,9 @@
     import org.mockito.invocation.InvocationOnMock;
     import org.mockito.stubbing.Answer;
     
    -import com.google.common.base.Preconditions;
    -import com.google.common.base.Supplier;
    -import com.google.common.collect.HashMultimap;
    +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
    +import java.util.function.Supplier;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap;
     
     @NotThreadSafe
     public class TestShortCircuitCache {
    @@ -431,7 +431,7 @@ public void testAllocShm() throws Exception {
         cluster.waitActive();
         DistributedFileSystem fs = cluster.getFileSystem();
         final ShortCircuitCache cache =
    -        fs.getClient().getClientContext().getShortCircuitCache();
    +        fs.getClient().getClientContext().getShortCircuitCache(0);
         cache.getDfsClientShmManager().visit(new Visitor() {
           @Override
           public void visit(HashMap info)
    @@ -501,7 +501,7 @@ public void testShmBasedStaleness() throws Exception {
         cluster.waitActive();
         DistributedFileSystem fs = cluster.getFileSystem();
         final ShortCircuitCache cache =
    -        fs.getClient().getClientContext().getShortCircuitCache();
    +        fs.getClient().getClientContext().getShortCircuitCache(0);
         String TEST_FILE = "/test_file";
         final int TEST_FILE_LEN = 8193;
         final int SEED = 0xFADED;
    @@ -565,7 +565,7 @@ public void testUnlinkingReplicasInFileDescriptorCache() throws Exception {
         cluster.waitActive();
         DistributedFileSystem fs = cluster.getFileSystem();
         final ShortCircuitCache cache =
    -        fs.getClient().getClientContext().getShortCircuitCache();
    +        fs.getClient().getClientContext().getShortCircuitCache(0);
         cache.getDfsClientShmManager().visit(new Visitor() {
           @Override
           public void visit(HashMap info)
    @@ -877,19 +877,20 @@ public void testRequestFileDescriptorsWhenULimit() throws Exception {
                 return peerCache;
               });
     
    -      Mockito.when(clientContext.getShortCircuitCache()).thenAnswer(
    +      Mockito.when(clientContext.getShortCircuitCache(
    +          blk.getBlock().getBlockId())).thenAnswer(
               (Answer) shortCircuitCacheCall -> {
    -            ShortCircuitCache cache = Mockito.mock(ShortCircuitCache.class);
    -            Mockito.when(cache.allocShmSlot(
    +              ShortCircuitCache cache = Mockito.mock(ShortCircuitCache.class);
    +              Mockito.when(cache.allocShmSlot(
                     Mockito.any(DatanodeInfo.class),
                     Mockito.any(DomainPeer.class),
                     Mockito.any(MutableBoolean.class),
                     Mockito.any(ExtendedBlockId.class),
                     Mockito.anyString()))
    -                .thenAnswer((Answer) call -> null);
    +                  .thenAnswer((Answer) call -> null);
     
    -            return cache;
    -          }
    +              return cache;
    +            }
           );
     
           DatanodeInfo[] nodes = blk.getLocations();
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
    index 366e07c15e716..40af7c57a61a4 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
    @@ -21,12 +21,13 @@
     import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
     import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
     import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
    +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY;
     import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
     import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY;
     import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY;
     
    -import com.google.common.base.Supplier;
    -import com.google.common.collect.Lists;
    +import java.util.function.Supplier;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
     
     import org.apache.commons.io.FileUtils;
     import org.apache.commons.text.TextStringBuilder;
    @@ -421,11 +422,12 @@ public void testNameNodeGetReconfigurableProperties() throws IOException {
         final List outs = Lists.newArrayList();
         final List errs = Lists.newArrayList();
         getReconfigurableProperties("namenode", address, outs, errs);
    -    assertEquals(12, outs.size());
    +    assertEquals(13, outs.size());
         assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(1));
         assertEquals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, outs.get(2));
         assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(3));
    -    assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(4));
    +    assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(4));
    +    assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(5));
         assertEquals(errs.size(), 0);
       }
     
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
    index 627ea0710df58..6b8657ccce3b4 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
    @@ -20,7 +20,7 @@
     import java.io.ByteArrayOutputStream;
     import java.io.PrintStream;
     
    -import com.google.common.base.Charsets;
    +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
     
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.fs.CommonConfigurationKeys;
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
    index db827bf0c3853..0086134d78817 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
    @@ -48,8 +48,8 @@
     import org.mockito.ArgumentCaptor;
     import org.mockito.Mockito;
     
    -import com.google.common.base.Charsets;
    -import com.google.common.base.Joiner;
    +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
    +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
     
     public class TestDFSHAAdmin {
       private static final Logger LOG =
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
    index fc569d0aa7dba..aa048f865c2de 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
    @@ -43,9 +43,9 @@
     import org.junit.Before;
     import org.junit.Test;
     
    -import com.google.common.base.Charsets;
    -import com.google.common.base.Joiner;
    -import com.google.common.io.Files;
    +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
    +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
    +import org.apache.hadoop.thirdparty.com.google.common.io.Files;
     
     /**
      * Tests for HAAdmin command with {@link MiniDFSCluster} set up in HA mode.
    @@ -189,13 +189,13 @@ public void testFencer() throws Exception {
         tmpFile.deleteOnExit();
         if (Shell.WINDOWS) {
           conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
    -          "shell(echo %target_nameserviceid%.%target_namenodeid% " +
    -              "%target_port% %dfs_ha_namenode_id% > " +
    +          "shell(echo %source_nameserviceid%.%source_namenodeid% " +
    +              "%source_port% %dfs_ha_namenode_id% > " +
                   tmpFile.getAbsolutePath() + ")");
         } else {
           conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
    -          "shell(echo -n $target_nameserviceid.$target_namenodeid " +
    -          "$target_port $dfs_ha_namenode_id > " +
    +          "shell(echo -n $source_nameserviceid.$source_namenodeid " +
    +          "$source_port $dfs_ha_namenode_id > " +
               tmpFile.getAbsolutePath() + ")");
         }
     
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
    index 6e8819641143b..0a7a87ca88f2a 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
    @@ -17,6 +17,7 @@
      */
     package org.apache.hadoop.hdfs.tools;
     
    +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
     import static org.junit.Assert.assertEquals;
     import static org.junit.Assert.assertTrue;
     
    @@ -46,6 +47,7 @@
     import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
     import org.apache.hadoop.net.ServerSocketUtil;
     import org.apache.hadoop.security.AccessControlException;
    +import org.apache.hadoop.security.alias.CredentialProviderFactory;
     import org.apache.hadoop.test.GenericTestUtils;
     import org.apache.hadoop.test.LambdaTestUtils;
     import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
    @@ -54,9 +56,11 @@
     import org.junit.Before;
     import org.junit.Test;
     
    -import com.google.common.base.Supplier;
    +import java.util.function.Supplier;
     
     public class TestDFSZKFailoverController extends ClientBaseWithFixes {
    +  private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1";
    +  private static final String WILDCARD_ADDRESS = "0.0.0.0";
       private Configuration conf;
       private MiniDFSCluster cluster;
       private TestContext ctx;
    @@ -90,14 +94,16 @@ public void setup() throws Exception {
             ServerSocketUtil.getPort(10023, 100));
         conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2",
             ServerSocketUtil.getPort(10024, 100));
    +  }
     
    +  private void startCluster() throws Exception {
         // prefer non-ephemeral port to avoid port collision on restartNameNode
         MiniDFSNNTopology topology = new MiniDFSNNTopology()
    -    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
    -        .addNN(new MiniDFSNNTopology.NNConf("nn1")
    -            .setIpcPort(ServerSocketUtil.getPort(10021, 100)))
    -        .addNN(new MiniDFSNNTopology.NNConf("nn2")
    -            .setIpcPort(ServerSocketUtil.getPort(10022, 100))));
    +        .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
    +            .addNN(new MiniDFSNNTopology.NNConf("nn1")
    +                .setIpcPort(ServerSocketUtil.getPort(10021, 100)))
    +            .addNN(new MiniDFSNNTopology.NNConf("nn2")
    +                .setIpcPort(ServerSocketUtil.getPort(10022, 100))));
         cluster = new MiniDFSCluster.Builder(conf)
             .nnTopology(topology)
             .numDataNodes(0)
    @@ -110,16 +116,16 @@ public void setup() throws Exception {
     
         thr1.start();
         waitForHAState(0, HAServiceState.ACTIVE);
    -    
    +
         ctx.addThread(thr2 = new ZKFCThread(ctx, 1));
         thr2.start();
    -    
    +
         // Wait for the ZKFCs to fully start up
         ZKFCTestUtil.waitForHealthState(thr1.zkfc,
             HealthMonitor.State.SERVICE_HEALTHY, ctx);
         ZKFCTestUtil.waitForHealthState(thr2.zkfc,
             HealthMonitor.State.SERVICE_HEALTHY, ctx);
    -    
    +
         fs = HATestUtil.configureFailoverFs(cluster, conf);
       }
       
    @@ -144,11 +150,26 @@ public void shutdown() throws Exception {
         }
       }
     
    +  @Test(timeout=60000)
    +  /**
    +   * Ensure the cluster simply starts with a hdfs jceks credential provider
    +   * configured. HDFS-14013.
    +   */
    +  public void testZFFCStartsWithCredentialProviderReferencingHDFS()
    +      throws Exception{
    +    // Create a provider path on HDFS
    +    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
    +        "jceks://hdfs/tmp/test.jceks");
    +    //
    +    startCluster();
    +  }
    +
       /**
        * Test that thread dump is captured after NN state changes.
        */
       @Test(timeout=60000)
       public void testThreadDumpCaptureAfterNNStateChange() throws Exception {
    +    startCluster();
         MockNameNodeResourceChecker mockResourceChecker =
             new MockNameNodeResourceChecker(conf);
         mockResourceChecker.setResourcesAvailable(false);
    @@ -166,6 +187,7 @@ public void testThreadDumpCaptureAfterNNStateChange() throws Exception {
        */
       @Test(timeout=60000)
       public void testFailoverAndBackOnNNShutdown() throws Exception {
    +    startCluster();
         Path p1 = new Path("/dir1");
         Path p2 = new Path("/dir2");
     
    @@ -198,6 +220,7 @@ public void testFailoverAndBackOnNNShutdown() throws Exception {
       
       @Test(timeout=30000)
       public void testManualFailover() throws Exception {
    +    startCluster();
         thr2.zkfc.getLocalTarget().getZKFCProxy(conf, 15000).gracefulFailover();
         waitForHAState(0, HAServiceState.STANDBY);
         waitForHAState(1, HAServiceState.ACTIVE);
    @@ -207,6 +230,29 @@ public void testManualFailover() throws Exception {
         waitForHAState(1, HAServiceState.STANDBY);
       }
     
    +  @Test(timeout=30000)
    +  public void testWithoutBindAddressSet() throws Exception {
    +    startCluster();
    +    DFSZKFailoverController zkfc = DFSZKFailoverController.create(
    +        conf);
    +
    +    assertEquals("Bind address not expected to be wildcard by default.",
    +        zkfc.getRpcAddressToBindTo().getHostString(),
    +        LOCALHOST_SERVER_ADDRESS);
    +  }
    +
    +  @Test(timeout=30000)
    +  public void testWithBindAddressSet() throws Exception {
    +    startCluster();
    +    conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
    +    DFSZKFailoverController zkfc = DFSZKFailoverController.create(
    +        conf);
    +    String addr = zkfc.getRpcAddressToBindTo().getHostString();
    +
    +    assertEquals("Bind address " + addr + " is not wildcard.",
    +        addr, WILDCARD_ADDRESS);
    +  }
    +
       /**
        * Tests that a Namenode in Observer state rejects any state transition
        * request from ZKFC, as a result of namenode's participation in the ZK
    @@ -215,6 +261,7 @@ public void testManualFailover() throws Exception {
        */
       @Test
       public void testObserverRejectZkfcCall() throws Exception {
    +    startCluster();
         NamenodeProtocols nn1 = cluster.getNameNode(1).getRpcServer();
         nn1.transitionToObserver(
             new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
    @@ -227,6 +274,7 @@ public void testObserverRejectZkfcCall() throws Exception {
     
       @Test(timeout=30000)
       public void testManualFailoverWithDFSHAAdmin() throws Exception {
    +    startCluster();
         DFSHAAdmin tool = new DFSHAAdmin();
         tool.setConf(conf);
         assertEquals(0, 
    @@ -255,6 +303,7 @@ public void testManualFailoverWithDFSHAAdmin() throws Exception {
     
       @Test(timeout=30000)
       public void testElectionOnObserver() throws Exception{
    +    startCluster();
         InputStream inOriginial = System.in;
         try {
           DFSHAAdmin tool = new DFSHAAdmin();
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
    index 13fee1f3938da..82ea34f2e0f20 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
    @@ -58,7 +58,7 @@
     import org.apache.hadoop.util.ToolRunner;
     import org.junit.Test;
     
    -import com.google.common.base.Joiner;
    +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
     
     /**
      * Test for {@link GetConf}
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java
    new file mode 100644
    index 0000000000000..161d072489a81
    --- /dev/null
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java
    @@ -0,0 +1,291 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hdfs.tools;
    +
    +import static org.hamcrest.CoreMatchers.containsString;
    +import static org.junit.Assert.assertEquals;
    +import static org.junit.Assert.assertThat;
    +
    +import java.io.ByteArrayOutputStream;
    +import java.io.File;
    +import java.io.IOException;
    +import java.io.PrintStream;
    +import java.net.URI;
    +import java.net.URISyntaxException;
    +import java.util.List;
    +import java.util.Scanner;
    +
    +import org.apache.hadoop.conf.Configuration;
    +import org.apache.hadoop.fs.CommonConfigurationKeys;
    +import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
    +import org.apache.hadoop.fs.FileSystem;
    +import org.apache.hadoop.fs.FsConstants;
    +import org.apache.hadoop.fs.Path;
    +import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme;
    +import org.apache.hadoop.fs.viewfs.ViewFsTestSetup;
    +import org.apache.hadoop.hdfs.DistributedFileSystem;
    +import org.apache.hadoop.hdfs.MiniDFSCluster;
    +import org.apache.hadoop.test.PathUtils;
    +import org.apache.hadoop.util.ToolRunner;
    +import org.junit.After;
    +import org.junit.Assert;
    +import org.junit.Before;
    +import org.junit.Test;
    +
    +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
    +
    +/**
    + * Tests DFSAdmin with ViewFileSystemOverloadScheme with configured mount links.
    + */
    +public class TestViewFileSystemOverloadSchemeWithDFSAdmin {
    +  private static final String FS_IMPL_PATTERN_KEY = "fs.%s.impl";
    +  private static final String HDFS_SCHEME = "hdfs";
    +  private Configuration conf = null;
    +  private MiniDFSCluster cluster = null;
    +  private URI defaultFSURI;
    +  private File localTargetDir;
    +  private static final String TEST_ROOT_DIR = PathUtils
    +      .getTestDirName(TestViewFileSystemOverloadSchemeWithDFSAdmin.class);
    +  private static final String HDFS_USER_FOLDER = "/HDFSUser";
    +  private static final String LOCAL_FOLDER = "/local";
    +  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
    +  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
    +  private static final PrintStream OLD_OUT = System.out;
    +  private static final PrintStream OLD_ERR = System.err;
    +
    +  /**
    +   * Sets up the configurations and starts the MiniDFSCluster.
    +   */
    +  @Before
    +  public void startCluster() throws IOException {
    +    conf = new Configuration();
    +    conf.setInt(
    +        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
    +    conf.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME),
    +        ViewFileSystemOverloadScheme.class.getName());
    +    conf.set(String.format(
    +        FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN,
    +        HDFS_SCHEME), DistributedFileSystem.class.getName());
    +    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    +    cluster.waitClusterUp();
    +    defaultFSURI =
    +        URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
    +    localTargetDir = new File(TEST_ROOT_DIR, "/root/");
    +    Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
    +  }
    +
    +  @After
    +  public void tearDown() throws IOException {
    +    try {
    +      System.out.flush();
    +      System.err.flush();
    +    } finally {
    +      System.setOut(OLD_OUT);
    +      System.setErr(OLD_ERR);
    +    }
    +    if (cluster != null) {
    +      FileSystem.closeAll();
    +      cluster.shutdown();
    +    }
    +    resetStream();
    +  }
    +
    +  private void redirectStream() {
    +    System.setOut(new PrintStream(out));
    +    System.setErr(new PrintStream(err));
    +  }
    +
    +  private void resetStream() {
    +    out.reset();
    +    err.reset();
    +  }
    +
    +  private static void scanIntoList(final ByteArrayOutputStream baos,
    +      final List list) {
    +    final Scanner scanner = new Scanner(baos.toString());
    +    while (scanner.hasNextLine()) {
    +      list.add(scanner.nextLine());
    +    }
    +    scanner.close();
    +  }
    +
    +  private void assertErrMsg(String errorMsg, int line) {
    +    final List errList = Lists.newArrayList();
    +    scanIntoList(err, errList);
    +    assertThat(errList.get(line), containsString(errorMsg));
    +  }
    +
    +  private void assertOutMsg(String outMsg, int line) {
    +    final List errList = Lists.newArrayList();
    +    scanIntoList(out, errList);
    +    assertThat(errList.get(line), containsString(outMsg));
    +  }
    +
    +  /**
    +   * Adds the given mount links to config. sources contains mount link src and
    +   * the respective index location in targets contains the target uri.
    +   */
    +  void addMountLinks(String mountTable, String[] sources, String[] targets,
    +      Configuration config) throws IOException, URISyntaxException {
    +    ViewFsTestSetup.addMountLinksToConf(mountTable, sources, targets, config);
    +  }
    +
    +  /**
    +   * Tests savenamespace with ViewFSOverloadScheme by specifying -fs option.
    +   */
    +  @Test
    +  public void testSaveNameSpace() throws Exception {
    +    final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER);
    +    addMountLinks(defaultFSURI.getHost(),
    +        new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER },
    +        new String[] {hdfsTargetPath.toUri().toString(),
    +            localTargetDir.toURI().toString() },
    +        conf);
    +    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
    +    int ret = ToolRunner.run(dfsAdmin,
    +        new String[] {"-fs", defaultFSURI.toString(), "-safemode", "enter" });
    +    assertEquals(0, ret);
    +    redirectStream();
    +    ret = ToolRunner.run(dfsAdmin,
    +        new String[] {"-fs", defaultFSURI.toString(), "-saveNamespace" });
    +    assertEquals(0, ret);
    +    assertOutMsg("Save namespace successful", 0);
    +    ret = ToolRunner.run(dfsAdmin,
    +        new String[] {"-fs", defaultFSURI.toString(), "-safemode", "leave" });
    +    assertEquals(0, ret);
    +
    +  }
    +
    +  /**
    +   * Tests savenamespace with ViewFSOverloadScheme, but without -fs option.
    +   */
    +  @Test
    +  public void testSaveNamespaceWithoutSpecifyingFS() throws Exception {
    +    final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER);
    +    addMountLinks(defaultFSURI.getHost(),
    +        new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER },
    +        new String[] {hdfsTargetPath.toUri().toString(),
    +            localTargetDir.toURI().toString() },
    +        conf);
    +    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
    +    int ret = ToolRunner.run(dfsAdmin, new String[] {"-safemode", "enter" });
    +    assertEquals(0, ret);
    +    redirectStream();
    +    ret = ToolRunner.run(dfsAdmin, new String[] {"-saveNamespace" });
    +    assertOutMsg("Save namespace successful", 0);
    +    assertEquals(0, ret);
    +    ret = ToolRunner.run(dfsAdmin, new String[] {"-safemode", "leave" });
    +    assertEquals(0, ret);
    +  }
    +
    +  /**
    +   * Tests safemode with ViewFSOverloadScheme, but with wrong target fs.
    +   */
    +  @Test
    +  public void testSafeModeWithWrongFS() throws Exception {
    +    final Path hdfsTargetPath =
    +        new Path("hdfs://nonExistent" + HDFS_USER_FOLDER);
    +    addMountLinks(defaultFSURI.getHost(), new String[] {HDFS_USER_FOLDER},
    +        new String[] {hdfsTargetPath.toUri().toString()}, conf);
    +    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
    +    redirectStream();
    +    int ret = ToolRunner.run(dfsAdmin, new String[] {"-safemode", "enter" });
    +    assertEquals(-1, ret);
    +    assertErrMsg("safemode: java.net.UnknownHostException: nonExistent", 0);
    +  }
    +
    +  /**
    +   * Tests safemode with ViewFSOverloadScheme, but -fs option with local fs.
    +   */
    +  @Test
    +  public void testSafeModeShouldFailOnLocalTargetFS() throws Exception {
    +    addMountLinks(defaultFSURI.getHost(), new String[] {LOCAL_FOLDER },
    +        new String[] {localTargetDir.toURI().toString() }, conf);
    +    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
    +    // ViewFSOveloadScheme uri with localfs mount point
    +    String uri = defaultFSURI.toString() + LOCAL_FOLDER;
    +    redirectStream();
    +    int ret = ToolRunner.run(dfsAdmin,
    +        new String[] {"-fs", uri, "-safemode", "enter" });
    +    assertEquals(-1, ret);
    +    assertErrMsg("safemode: FileSystem file:/// is not an HDFS file system."
    +        + " The fs class is: org.apache.hadoop.fs.LocalFileSystem", 0);
    +  }
    +
    +  /**
    +   * Tests safemode get with ViewFSOverloadScheme, but without any mount links
    +   * configured. The ViewFSOverloadScheme should consider initialized fs as
    +   * fallback fs automatically.
    +   */
    +  @Test
    +  public void testGetSafemodeWithoutMountLinksConfigured() throws Exception {
    +    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
    +    try {
    +      redirectStream();
    +      int ret = ToolRunner.run(dfsAdmin,
    +          new String[] {"-fs", defaultFSURI.toString(), "-safemode", "get"});
    +      assertOutMsg("Safe mode is OFF", 0);
    +      assertEquals(0, ret);
    +    } finally {
    +      dfsAdmin.close();
    +    }
    +  }
    +
    +  /**
    +   * Tests allowSnapshot and disallowSnapshot with ViewFSOverloadScheme.
    +   */
    +  @Test
    +  public void testAllowAndDisalllowSnapShot() throws Exception {
    +    final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER);
    +    addMountLinks(defaultFSURI.getHost(),
    +        new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER},
    +        new String[] {hdfsTargetPath.toUri().toString(),
    +            localTargetDir.toURI().toString() },
    +        conf);
    +    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
    +    redirectStream();
    +    int ret = ToolRunner.run(dfsAdmin,
    +        new String[] {"-fs", defaultFSURI.toString(), "-allowSnapshot", "/" });
    +    assertOutMsg("Allowing snapshot on / succeeded", 0);
    +    assertEquals(0, ret);
    +    ret = ToolRunner.run(dfsAdmin, new String[] {"-fs",
    +        defaultFSURI.toString(), "-disallowSnapshot", "/" });
    +    assertOutMsg("Disallowing snapshot on / succeeded", 1);
    +    assertEquals(0, ret);
    +  }
    +
    +  /**
    +   * Tests setBalancerBandwidth with ViewFSOverloadScheme.
    +   */
    +  @Test
    +  public void testSetBalancerBandwidth() throws Exception {
    +    final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER);
    +    addMountLinks(defaultFSURI.getHost(),
    +        new String[] {HDFS_USER_FOLDER, LOCAL_FOLDER },
    +        new String[] {hdfsTargetPath.toUri().toString(),
    +            localTargetDir.toURI().toString() },
    +        conf);
    +    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
    +    redirectStream();
    +    int ret = ToolRunner.run(dfsAdmin,
    +        new String[] {"-fs", defaultFSURI.toString(), "-setBalancerBandwidth",
    +            "1000"});
    +    assertOutMsg("Balancer bandwidth is set to 1000", 0);
    +    assertEquals(0, ret);
    +  }
    +}
    \ No newline at end of file
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java
    new file mode 100644
    index 0000000000000..83e49d9069dba
    --- /dev/null
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java
    @@ -0,0 +1,173 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hdfs.tools;
    +
    +import static org.junit.Assert.assertEquals;
    +
    +import java.io.ByteArrayOutputStream;
    +import java.io.File;
    +import java.io.IOException;
    +import java.io.PrintStream;
    +import java.net.URI;
    +import java.net.URISyntaxException;
    +import java.util.List;
    +import java.util.Scanner;
    +
    +import org.apache.hadoop.conf.Configuration;
    +import org.apache.hadoop.fs.CommonConfigurationKeys;
    +import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
    +import org.apache.hadoop.fs.FileSystem;
    +import org.apache.hadoop.fs.FsConstants;
    +import org.apache.hadoop.fs.FsShell;
    +import org.apache.hadoop.fs.Path;
    +import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme;
    +import org.apache.hadoop.fs.viewfs.ViewFsTestSetup;
    +import org.apache.hadoop.hdfs.DistributedFileSystem;
    +import org.apache.hadoop.hdfs.MiniDFSCluster;
    +import org.apache.hadoop.test.PathUtils;
    +import org.apache.hadoop.util.ToolRunner;
    +import org.junit.After;
    +import org.junit.Assert;
    +import org.junit.Before;
    +import org.junit.Test;
    +
    +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
    +
    +/**
    + * Tests HDFS commands with ViewFileSystemOverloadScheme with configured mount
    + * links.
    + */
    +public class TestViewFileSystemOverloadSchemeWithFSCommands {
    +  private static final String FS_IMPL_PATTERN_KEY = "fs.%s.impl";
    +  private static final String HDFS_SCHEME = "hdfs";
    +  private Configuration conf = null;
    +  private MiniDFSCluster cluster = null;
    +  private URI defaultFSURI;
    +  private File localTargetDir;
    +  private static final String TEST_ROOT_DIR = PathUtils
    +      .getTestDirName(TestViewFileSystemOverloadSchemeWithFSCommands.class);
    +  private static final String HDFS_USER_FOLDER = "/HDFSUser";
    +  private static final String LOCAL_FOLDER = "/local";
    +  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
    +  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
    +  private static final PrintStream OLD_OUT = System.out;
    +  private static final PrintStream OLD_ERR = System.err;
    +
    +  /**
    +   * Sets up the configurations and starts the MiniDFSCluster.
    +   */
    +  @Before
    +  public void startCluster() throws IOException {
    +    conf = new Configuration();
    +    conf.setInt(
    +        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
    +    conf.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME),
    +        ViewFileSystemOverloadScheme.class.getName());
    +    conf.set(String.format(
    +        FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN,
    +        HDFS_SCHEME), DistributedFileSystem.class.getName());
    +    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    +    cluster.waitClusterUp();
    +    defaultFSURI =
    +        URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
    +    localTargetDir = new File(TEST_ROOT_DIR, "/root/");
    +    Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
    +  }
    +
    +  @After
    +  public void tearDown() throws IOException {
    +    try {
    +      System.out.flush();
    +      System.err.flush();
    +    } finally {
    +      System.setOut(OLD_OUT);
    +      System.setErr(OLD_ERR);
    +    }
    +    if (cluster != null) {
    +      FileSystem.closeAll();
    +      cluster.shutdown();
    +    }
    +    resetStream();
    +  }
    +
    +  private void redirectStream() {
    +    System.setOut(new PrintStream(out));
    +    System.setErr(new PrintStream(err));
    +  }
    +
    +  private void resetStream() {
    +    out.reset();
    +    err.reset();
    +  }
    +
    +  private static void scanIntoList(final ByteArrayOutputStream baos,
    +      final List list) {
    +    final Scanner scanner = new Scanner(baos.toString());
    +    while (scanner.hasNextLine()) {
    +      list.add(scanner.nextLine());
    +    }
    +    scanner.close();
    +  }
    +
    +  /**
    +   * Adds the given mount links to config. sources contains mount link src and
    +   * the respective index location in targets contains the target uri.
    +   */
    +  void addMountLinks(String mountTable, String[] sources, String[] targets,
    +      Configuration config) throws IOException, URISyntaxException {
    +    ViewFsTestSetup.addMountLinksToConf(mountTable, sources, targets, config);
    +  }
    +
    +  /**
    +   * Tests DF with ViewFSOverloadScheme.
    +   */
    +  @Test
    +  public void testDFWithViewFsOverloadScheme() throws Exception {
    +    final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER);
    +    List mounts = Lists.newArrayList();
    +    mounts.add(HDFS_USER_FOLDER);
    +    mounts.add(LOCAL_FOLDER);
    +    addMountLinks(defaultFSURI.getHost(),
    +        mounts.toArray(new String[mounts.size()]),
    +        new String[] {hdfsTargetPath.toUri().toString(),
    +            localTargetDir.toURI().toString() },
    +        conf);
    +    FsShell fsShell = new FsShell(conf);
    +    try {
    +      redirectStream();
    +      int ret =
    +          ToolRunner.run(fsShell, new String[] {"-fs", defaultFSURI.toString(),
    +              "-df", "-h", defaultFSURI.toString() + "/" });
    +      assertEquals(0, ret);
    +      final List errList = Lists.newArrayList();
    +      scanIntoList(out, errList);
    +      assertEquals(3, errList.size());
    +      for (int i = 1; i < errList.size(); i++) {
    +        String[] lineSplits = errList.get(i).split("\\s+");
    +        String mount = lineSplits[lineSplits.length - 1];
    +        mounts.remove(mount);
    +      }
    +      String msg =
    +          "DF was not calculated on all mounts. The left out mounts are: "
    +              + mounts;
    +      assertEquals(msg, 0, mounts.size());
    +    } finally {
    +      fsShell.close();
    +    }
    +  }
    +}
    \ No newline at end of file
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
    index 80155691b05c6..226e4861f45f5 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
    @@ -47,7 +47,7 @@
     import org.junit.Test;
     import org.junit.rules.TemporaryFolder;
     
    -import com.google.common.collect.ImmutableSet;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
     
     public class TestOfflineEditsViewer {
       private static final Logger LOG = LoggerFactory
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
    index d3f2f594cfb77..6ac12accff79c 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
    @@ -17,7 +17,7 @@
      */
     package org.apache.hadoop.hdfs.tools.offlineImageViewer;
     
    -import com.google.common.collect.ImmutableMap;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
     
     import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
     import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
    @@ -135,8 +135,8 @@
     import org.xml.sax.SAXException;
     import org.xml.sax.helpers.DefaultHandler;
     
    -import com.google.common.collect.Lists;
    -import com.google.common.collect.Maps;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
     import org.apache.hadoop.thirdparty.protobuf.ByteString;
     
     public class TestOfflineImageViewer {
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java
    index 5ecec2db3ba3b..b41ec9690ce30 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java
    @@ -71,8 +71,8 @@
     import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
     import static org.junit.Assert.assertEquals;
     
    -import com.google.common.collect.Lists;
    -import com.google.common.collect.Maps;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
     
     /**
      * Tests OfflineImageViewer if the input fsimage has HDFS ACLs
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
    index c4ae8ce79b55a..bce92ef797da2 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
    @@ -38,7 +38,7 @@
     import org.junit.Test;
     import org.junit.rules.ExpectedException;
     
    -import com.google.common.base.Joiner;
    +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
     
     public class TestAtomicFileOutputStream {
     
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java
    new file mode 100644
    index 0000000000000..6444778f54e60
    --- /dev/null
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java
    @@ -0,0 +1,114 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.hadoop.hdfs.util;
    +
    +import org.apache.hadoop.hdfs.server.namenode.AclFeature;
    +import org.junit.Assert;
    +import org.junit.Test;
    +
    +/**
    + * Verify ReferenceCount map in concurrent scenarios.
    + */
    +public class TestReferenceCountMap {
    +  //Add these number of references in loop
    +  public static final int LOOP_COUNTER = 10000;
    +  //Use 2 global features
    +  private AclFeature aclFeature1 = new AclFeature(new int[]{1});
    +  private AclFeature aclFeature2 = new AclFeature(new int[]{2});
    +
    +  @Test
    +  public void testReferenceCountMap() throws Exception {
    +    ReferenceCountMap countMap = new ReferenceCountMap<>();
    +    countMap.put(aclFeature1);
    +    countMap.put(aclFeature2);
    +    Assert.assertEquals(1, countMap.getReferenceCount(aclFeature1));
    +    Assert.assertEquals(1, countMap.getReferenceCount(aclFeature2));
    +
    +    countMap.put(aclFeature1);
    +    countMap.put(aclFeature2);
    +    Assert.assertEquals(2, countMap.getReferenceCount(aclFeature1));
    +    Assert.assertEquals(2, countMap.getReferenceCount(aclFeature2));
    +
    +    countMap.put(aclFeature1);
    +    Assert.assertEquals(3, countMap.getReferenceCount(aclFeature1));
    +    countMap.put(aclFeature1);
    +    Assert.assertEquals(4, countMap.getReferenceCount(aclFeature1));
    +    Assert.assertEquals(2, countMap.getReferenceCount(aclFeature2));
    +
    +    //Delete operations:
    +    countMap.remove(aclFeature1);
    +    countMap.remove(aclFeature2);
    +    Assert.assertEquals(3, countMap.getReferenceCount(aclFeature1));
    +    Assert.assertEquals(1, countMap.getReferenceCount(aclFeature2));
    +
    +    //Verify unique elements in map
    +    Assert.assertEquals(2, countMap.getUniqueElementsSize());
    +  }
    +
    +  @Test
    +  public void testRefCountMapConcurrently() throws Exception {
    +    ReferenceCountMap countMap = new ReferenceCountMap<>();
    +
    +    PutThread putThread1 = new PutThread(countMap);
    +    putThread1.start();
    +    PutThread putThread2 = new PutThread(countMap);
    +    putThread2.start();
    +    RemoveThread removeThread1 = new RemoveThread(countMap);
    +
    +    putThread1.join();
    +    putThread2.join();
    +    Assert.assertEquals(2 * LOOP_COUNTER,
    +        countMap.getReferenceCount(aclFeature1));
    +    Assert.assertEquals(2 * LOOP_COUNTER,
    +        countMap.getReferenceCount(aclFeature2));
    +
    +    removeThread1.start();
    +    removeThread1.join();
    +    Assert.assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature1));
    +    Assert.assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature2));
    +  }
    +
    +  class PutThread extends Thread {
    +    private ReferenceCountMap referenceCountMap;
    +    PutThread(ReferenceCountMap referenceCountMap) {
    +      this.referenceCountMap = referenceCountMap;
    +    }
    +    @Override
    +    public void run() {
    +      for (int i = 0; i < LOOP_COUNTER; i++) {
    +        referenceCountMap.put(aclFeature1);
    +        referenceCountMap.put(aclFeature2);
    +      }
    +    }
    +  };
    +
    +  class RemoveThread extends Thread {
    +    private ReferenceCountMap referenceCountMap;
    +    RemoveThread(ReferenceCountMap referenceCountMap) {
    +      this.referenceCountMap = referenceCountMap;
    +    }
    +    @Override
    +    public void run() {
    +      for (int i = 0; i < LOOP_COUNTER; i++) {
    +        referenceCountMap.remove(aclFeature1);
    +        referenceCountMap.remove(aclFeature2);
    +      }
    +    }
    +  };
    +}
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
    index df38e82e540fc..9d8c82c63708a 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
    @@ -18,7 +18,7 @@
     
     package org.apache.hadoop.hdfs.util;
     
    -import com.google.common.base.Preconditions;
    +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
     import org.apache.hadoop.fs.StorageType;
     import org.apache.hadoop.hdfs.DFSTestUtil;
     import org.apache.hadoop.hdfs.StripedFileTestUtil;
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
    index ca256ab3da06f..a4107a5f05c57 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
    @@ -56,7 +56,7 @@
     
     import com.fasterxml.jackson.databind.ObjectMapper;
     import com.fasterxml.jackson.databind.ObjectReader;
    -import com.google.common.collect.Lists;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
     
     public class TestJsonUtil {
     
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
    index 3ee7fcbd61a2a..d02a5fec4d635 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
    @@ -52,9 +52,10 @@
     import java.util.EnumSet;
     import java.util.Iterator;
     import java.util.Map;
    +import java.util.NoSuchElementException;
     import java.util.Random;
     
    -import com.google.common.collect.ImmutableList;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
     import org.apache.commons.io.IOUtils;
     import org.apache.hadoop.fs.QuotaUsage;
     import org.apache.hadoop.test.LambdaTestUtils;
    @@ -399,7 +400,7 @@ public Void run() throws IOException, URISyntaxException {
                 try {
                   it.next();
                   fail("Iterator should error if out of elements.");
    -            } catch (IllegalStateException e) {
    +            } catch (NoSuchElementException e) {
                   // pass
                 }
                 return null;
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
    index 74c3f046ff09d..5758fe7986099 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
    @@ -29,6 +29,7 @@
     import java.util.Map;
     import java.util.Random;
     import java.util.Set;
    +import java.util.concurrent.TimeUnit;
     
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.hdfs.DFSTestUtil;
    @@ -56,7 +57,7 @@ public class TestNetworkTopology {
       private DatanodeDescriptor dataNodes[];
     
       @Rule
    -  public Timeout testTimeout = new Timeout(30000);
    +  public Timeout testTimeout = new Timeout(30000, TimeUnit.MILLISECONDS);
     
       @Before
       public void setupDatanodes() {
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
    index 9fd505fd4a706..5cfd8f6c97fde 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
    @@ -80,8 +80,6 @@ public void initializeMemberVariables() {
             .add("dfs.datanode.non.local.lazy.persist");
         configurationPropsToSkipCompare
             .add("dfs.namenode.tolerate.heartbeat.multiplier");
    -    configurationPropsToSkipCompare
    -        .add("dfs.namenode.stripe.min");
         configurationPropsToSkipCompare
             .add("dfs.namenode.replqueue.threshold-pct");
     
    @@ -121,9 +119,6 @@ public void initializeMemberVariables() {
         // Used oddly by DataNode to create new config String
         xmlPropsToSkipCompare.add("hadoop.hdfs.configuration.version");
     
    -    // Skip comparing in branch-2.  Removed in trunk with HDFS-7985.
    -    xmlPropsToSkipCompare.add("dfs.webhdfs.enabled");
    -
         // Some properties have moved to HdfsClientConfigKeys
         xmlPropsToSkipCompare.add("dfs.client.short.circuit.replica.stale.threshold.ms");
     
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
    index e2f1011e79138..a814035e0f514 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
    @@ -32,8 +32,8 @@
     import org.apache.hadoop.util.ExitUtil.ExitException;
     import org.junit.BeforeClass;
     import org.junit.Test;
    -import com.google.common.collect.ImmutableSet;
    -import com.google.common.io.ByteStreams;
    +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
    +import org.apache.hadoop.thirdparty.com.google.common.io.ByteStreams;
     
     public class TestTools {
     
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/contract/hdfs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/contract/hdfs.xml
    index 3c9fcccc73846..28721f7574960 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/contract/hdfs.xml
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/contract/hdfs.xml
    @@ -116,4 +116,19 @@
         true
       
     
    +  
    +    fs.contract.supports-hflush
    +    true
    +  
    +
    +  
    +    fs.contract.supports-hsync
    +    true
    +  
    +
    +  
    +    fs.contract.metadata_updated_on_hsync
    +    false
    +  
    +
     
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
    index df3b69169dec8..997854dcce7a3 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
    @@ -48,29 +48,5 @@ log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
     log4j.appender.DNMETRICSRFA.MaxBackupIndex=1
     log4j.appender.DNMETRICSRFA.MaxFileSize=64MB
     
    -#
    -# Add a logger for ozone that is separate from the Datanode.
    -#
    -log4j.logger.org.apache.hadoop.ozone=INFO,OZONE,FILE
    -
    -# Do not log into datanode logs. Remove this line to have single log.
    -log4j.additivity.org.apache.hadoop.ozone=false
    -
    -# For development purposes, log both to console and log file.
    -log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
    -log4j.appender.OZONE.Threshold=ALL
    -log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
    -log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) \
    - %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
    -
    -# Real ozone logger that writes to ozone.log
    -log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
    -log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
    -log4j.appender.FILE.Threshold=debug
    -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
    -log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
    -  (%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
    -  %m%n
    -
     # Supress KMS error log
     log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
    \ No newline at end of file
    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
    index ea327c4ffb620..6142a1a566a94 100644
    --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
    +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
    @@ -1060,9 +1060,9 @@
             
               RegexpAcrossOutputComparator
               Found 3 items
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file1
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file2
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file3
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file1
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file2
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file3
             
           
         
    @@ -1081,9 +1081,9 @@
             
               RegexpAcrossOutputComparator
               Found 3 items
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file3
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file2
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file1
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file3
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file2
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/file1
             
           
         
    @@ -1105,10 +1105,10 @@
             
               RegexpAcrossOutputComparator
               Found 4 items
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*120( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data120bytes
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*60( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data60bytes
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*30( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data30bytes
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*15( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data15bytes
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*120( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data120bytes
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*60( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data60bytes
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*30( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data30bytes
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*15( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data15bytes
             
           
         
    @@ -1130,10 +1130,10 @@
             
               RegexpAcrossOutputComparator
               Found 4 items
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*15( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data15bytes
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*30( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data30bytes
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*60( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data60bytes
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*120( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data120bytes
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*15( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data15bytes
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*30( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data30bytes
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*60( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data60bytes
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*120( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/data120bytes
             
           
         
    @@ -1157,11 +1157,11 @@
             
               RegexpAcrossOutputComparator
               Found 5 items
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/fifth
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/fourth
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/third
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/second
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/first
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/fifth
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/fourth
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/third
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/second
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/first
             
           
         
    @@ -1185,11 +1185,11 @@
             
               RegexpAcrossOutputComparator
               Found 5 items
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/first
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/second
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/third
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/fourth
    --rw-r--r--( )*1( )*[a-zA-z0-9]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/fifth
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/first
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/second
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/third
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/fourth
    +-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir/fifth
             
           
         
    diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml
    index 352db4769b81e..0846bb9daf0ed 100644
    --- a/hadoop-hdfs-project/pom.xml
    +++ b/hadoop-hdfs-project/pom.xml
    @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       
         org.apache.hadoop
         hadoop-project
    -    3.3.0-SNAPSHOT
    +    3.3.1-SNAPSHOT
         ../hadoop-project
       
       hadoop-hdfs-project
    -  3.3.0-SNAPSHOT
    +  3.3.1-SNAPSHOT
       Apache Hadoop HDFS Project
       Apache Hadoop HDFS Project
       pom
    diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
    index 9773ec89ded6c..3e52556a08f0b 100755
    --- a/hadoop-mapreduce-project/bin/mapred
    +++ b/hadoop-mapreduce-project/bin/mapred
    @@ -105,6 +105,8 @@ function mapredcmd_case
         minicluster)
           hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}/timelineservice"'/*'
           hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}/test"'/*'
    +      junitjar=$(echo "${HADOOP_TOOLS_LIB_JARS_DIR}"/junit-[0-9]*.jar)
    +      hadoop_add_classpath "${junitjar}"
           HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.MiniHadoopClusterManager
         ;;
         *)
    diff --git a/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml b/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
    index 9b4d8c90f5027..4e459b652b29c 100644
    --- a/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
    +++ b/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
    @@ -533,5 +533,17 @@
         
         
       
    -   
    +
    +  
    +  
    +    
    +      
    +      
    +    
    +    
    +    
    +  
      
    diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_3.2.2.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_3.2.2.xml
    new file mode 100644
    index 0000000000000..929cd6b55a5be
    --- /dev/null
    +++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_3.2.2.xml
    @@ -0,0 +1,113 @@
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +  
    +  
    +    
    +    
    +    
    +      
    +    
    +    
    +    
    +    
    +  
    +  
    +  
    +  
    +    
    +    
    +    
    +  
    +  
    +  
    +  
    +    
    +    
    +    
    +      
    +    
    +  
    +  
    +  
    +  
    +    
    +    
    +    
    +      
    +    
    +    
    +    
    +    
    +  
    +  
    +  
    +  
    +    
    +    
    +    
    +      
    +    
    +    
    +    
    +    
    +  
    +  
    +
    +
    +
    +
    +
    +
    +
    diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.2.2.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.2.2.xml
    new file mode 100644
    index 0000000000000..c4e35bf8bd230
    --- /dev/null
    +++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.2.2.xml
    @@ -0,0 +1,28149 @@
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +  
    +  
    +    
    +    
    +    
    +      
    +      
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +      
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +      
    +      FileStatus of a given cache file on hdfs
    + @throws IOException]]>
    +      
    +    
    +    
    +      
    +      
    +      
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +    DistributedCache is a facility provided by the Map-Reduce
    + framework to cache files (text, archives, jars etc.) needed by applications.
    + 

    + +

    Applications specify the files, via urls (hdfs:// or http://) to be cached + via the {@link org.apache.hadoop.mapred.JobConf}. The + DistributedCache assumes that the files specified via urls are + already present on the {@link FileSystem} at the path specified by the url + and are accessible by every machine in the cluster.

    + +

    The framework will copy the necessary files on to the worker node before + any tasks for the job are executed on that node. Its efficiency stems from + the fact that the files are only copied once per job and the ability to + cache archives which are un-archived on the workers.

    + +

    DistributedCache can be used to distribute simple, read-only + data/text files and/or more complex types such as archives, jars etc. + Archives (zip, tar and tgz/tar.gz files) are un-archived at the worker nodes. + Jars may be optionally added to the classpath of the tasks, a rudimentary + software distribution mechanism. Files have execution permissions. + In older version of Hadoop Map/Reduce users could optionally ask for symlinks + to be created in the working directory of the child task. In the current + version symlinks are always created. If the URL does not have a fragment + the name of the file or directory will be used. If multiple files or + directories map to the same link name, the last one added, will be used. All + others will not even be downloaded.

    + +

    DistributedCache tracks modification timestamps of the cache + files. Clearly the cache files should not be modified by the application + or externally while the job is executing.

    + +

    Here is an illustrative example on how to use the + DistributedCache:

    +

    +     // Setting up the cache for the application
    +
    +     1. Copy the requisite files to the FileSystem:
    +
    +     $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
    +     $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
    +     $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
    +     $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
    +     $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
    +     $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
    +
    +     2. Setup the application's JobConf:
    +
    +     JobConf job = new JobConf();
    +     DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
    +                                   job);
    +     DistributedCache.addCacheArchive(new URI("/myapp/map.zip"), job);
    +     DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
    +     DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar"), job);
    +     DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz"), job);
    +     DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz"), job);
    +
    +     3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
    +     or {@link org.apache.hadoop.mapred.Reducer}:
    +
    +     public static class MapClass extends MapReduceBase
    +     implements Mapper<K, V, K, V> {
    +
    +       private Path[] localArchives;
    +       private Path[] localFiles;
    +
    +       public void configure(JobConf job) {
    +         // Get the cached archives/files
    +         File f = new File("./map.zip/some/file/in/zip.txt");
    +       }
    +
    +       public void map(K key, V value,
    +                       OutputCollector<K, V> output, Reporter reporter)
    +       throws IOException {
    +         // Use data from the cached archives/files here
    +         // ...
    +         // ...
    +         output.collect(k, v);
    +       }
    +     }
    +
    + 
    + + It is also very common to use the DistributedCache by using + {@link org.apache.hadoop.util.GenericOptionsParser}. + + This class includes methods that should be used by users + (specifically those mentioned in the example above, as well + as {@link DistributedCache#addArchiveToClassPath(Path, Configuration)}), + as well as methods intended for use by the MapReduce framework + (e.g., {@link org.apache.hadoop.mapred.JobClient}). + + @see org.apache.hadoop.mapred.JobConf + @see org.apache.hadoop.mapred.JobClient + @see org.apache.hadoop.mapreduce.Job]]> +
    +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + JobTracker, + as {@link JobTracker.State} + + {@link JobTracker.State} should no longer be used on M/R 2.x. The function + is kept to be compatible with M/R 1.x applications. + + @return the invalid state of the JobTracker.]]> + + + + + + + + + + + + + + ClusterStatus provides clients with information such as: +
      +
    1. + Size of the cluster. +
    2. +
    3. + Name of the trackers. +
    4. +
    5. + Task capacity of the cluster. +
    6. +
    7. + The number of currently running map and reduce tasks. +
    8. +
    9. + State of the JobTracker. +
    10. +
    11. + Details regarding black listed trackers. +
    12. +
    + +

    Clients can query for the latest ClusterStatus, via + {@link JobClient#getClusterStatus()}.

    + + @see JobClient]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Counters represent global counters, defined either by the + Map-Reduce framework or applications. Each Counter can be of + any {@link Enum} type.

    + +

    Counters are bunched into {@link Group}s, each comprising of + counters from a particular Enum class.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Group of counters, comprising of counters from a particular + counter {@link Enum} class. + +

    Grouphandles localization of the class name and the + counter names.

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + FileInputFormat always returns + true. Implementations that may deal with non-splittable files must + override this method. + + FileInputFormat implementations can override this and return + false to ensure that individual input files are never split-up + so that {@link Mapper}s process entire files. + + @param fs the file system that the file is on + @param filename the file name to check + @return is this file splitable?]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FileInputFormat is the base class for all file-based + InputFormats. This provides a generic implementation of + {@link #getSplits(JobConf, int)}. + + Implementations of FileInputFormat can also override the + {@link #isSplitable(FileSystem, Path)} method to prevent input files + from being split-up in certain situations. Implementations that may + deal with non-splittable files must override this method, since + the default implementation assumes splitting is always possible.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if the job output should be compressed, + false otherwise]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tasks' Side-Effect Files + +

    Note: The following is valid only if the {@link OutputCommitter} + is {@link FileOutputCommitter}. If OutputCommitter is not + a FileOutputCommitter, the task's temporary output + directory is same as {@link #getOutputPath(JobConf)} i.e. + ${mapreduce.output.fileoutputformat.outputdir}$

    + +

    Some applications need to create/write-to side-files, which differ from + the actual job-outputs. + +

    In such cases there could be issues with 2 instances of the same TIP + (running simultaneously e.g. speculative tasks) trying to open/write-to the + same file (path) on HDFS. Hence the application-writer will have to pick + unique names per task-attempt (e.g. using the attemptid, say + attempt_200709221812_0001_m_000000_0), not just per TIP.

    + +

    To get around this the Map-Reduce framework helps the application-writer + out by maintaining a special + ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} + sub-directory for each task-attempt on HDFS where the output of the + task-attempt goes. On successful completion of the task-attempt the files + in the ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} (only) + are promoted to ${mapreduce.output.fileoutputformat.outputdir}. Of course, the + framework discards the sub-directory of unsuccessful task-attempts. This + is completely transparent to the application.

    + +

    The application-writer can take advantage of this by creating any + side-files required in ${mapreduce.task.output.dir} during execution + of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the + framework will move them out similarly - thus she doesn't have to pick + unique paths per task-attempt.

    + +

    Note: the value of ${mapreduce.task.output.dir} during + execution of a particular task-attempt is actually + ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_{$taskid}, and this value is + set by the map-reduce framework. So, just create any side-files in the + path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce + task to take advantage of this feature.

    + +

    The entire discussion holds true for maps of jobs with + reducer=NONE (i.e. 0 reduces) since output of the map, in that case, + goes directly to HDFS.

    + + @return the {@link Path} to the task's temporary output directory + for the map-reduce job.]]> +
    +
    + + + + + + + + + + + + + The generated name can be used to create custom files from within the + different tasks for the job, the names for different tasks will not collide + with each other.

    + +

    The given name is postfixed with the task type, 'm' for maps, 'r' for + reduces and the task partition number. For example, give a name 'test' + running on the first map o the job the generated name will be + 'test-m-00000'.

    + + @param conf the configuration for the job. + @param name the name to make unique. + @return a unique name accross all tasks of the job.]]> +
    +
    + + + + + The path can be used to create custom files from within the map and + reduce tasks. The path name will be unique for each task. The path parent + will be the job output directory.

    ls + +

    This method uses the {@link #getUniqueName} method to make the file name + unique for the task.

    + + @param conf the configuration for the job. + @param name the name for the file. + @return a unique path accross all tasks of the job.]]> +
    +
    + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    or + conf.setInt(FixedLengthInputFormat.FIXED_RECORD_LENGTH, recordLength); +

    + @see FixedLengthRecordReader]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + Each {@link InputSplit} is then assigned to an individual {@link Mapper} + for processing.

    + +

    Note: The split is a logical split of the inputs and the + input files are not physically split into chunks. For e.g. a split could + be <input-file-path, start, offset> tuple. + + @param job job configuration. + @param numSplits the desired number of splits, a hint. + @return an array of {@link InputSplit}s for the job.]]> + + + + + + + + + It is the responsibility of the RecordReader to respect + record boundaries while processing the logical split to present a + record-oriented view to the individual task.

    + + @param split the {@link InputSplit} + @param job the job that this split belongs to + @return a {@link RecordReader}]]> +
    +
    + + InputFormat describes the input-specification for a + Map-Reduce job. + +

    The Map-Reduce framework relies on the InputFormat of the + job to:

    +

      +
    1. + Validate the input-specification of the job. +
    2. + Split-up the input file(s) into logical {@link InputSplit}s, each of + which is then assigned to an individual {@link Mapper}. +
    3. +
    4. + Provide the {@link RecordReader} implementation to be used to glean + input records from the logical InputSplit for processing by + the {@link Mapper}. +
    5. +
    + +

    The default behavior of file-based {@link InputFormat}s, typically + sub-classes of {@link FileInputFormat}, is to split the + input into logical {@link InputSplit}s based on the total size, in + bytes, of the input files. However, the {@link FileSystem} blocksize of + the input files is treated as an upper bound for input splits. A lower bound + on the split size can be set via + + mapreduce.input.fileinputformat.split.minsize.

    + +

    Clearly, logical splits based on input-size is insufficient for many + applications since record boundaries are to be respected. In such cases, the + application has to also implement a {@link RecordReader} on whom lies the + responsibilty to respect record-boundaries and present a record-oriented + view of the logical InputSplit to the individual task. + + @see InputSplit + @see RecordReader + @see JobClient + @see FileInputFormat]]> + + + + + + + + + + InputSplit. + + @return the number of bytes in the input split. + @throws IOException]]> + + + + + + InputSplit is + located as an array of Strings. + @throws IOException]]> + + + + InputSplit represents the data to be processed by an + individual {@link Mapper}. + +

    Typically, it presents a byte-oriented view on the input and is the + responsibility of {@link RecordReader} of the job to process this and present + a record-oriented view. + + @see InputFormat + @see RecordReader]]> + + + + + + + + + + SplitLocationInfos describing how the split + data is stored at each location. A null value indicates that all the + locations have the data stored on disk. + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + JobClient.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + jobid doesn't correspond to any known job. + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + JobClient is the primary interface for the user-job to interact + with the cluster. + + JobClient provides facilities to submit jobs, track their + progress, access component-tasks' reports/logs, get the Map-Reduce cluster + status information etc. + +

    The job submission process involves: +

      +
    1. + Checking the input and output specifications of the job. +
    2. +
    3. + Computing the {@link InputSplit}s for the job. +
    4. +
    5. + Setup the requisite accounting information for the {@link DistributedCache} + of the job, if necessary. +
    6. +
    7. + Copying the job's jar and configuration to the map-reduce system directory + on the distributed file-system. +
    8. +
    9. + Submitting the job to the cluster and optionally monitoring + it's status. +
    10. +
    + + Normally the user creates the application, describes various facets of the + job via {@link JobConf} and then uses the JobClient to submit + the job and monitor its progress. + +

    Here is an example on how to use JobClient:

    +

    +     // Create a new JobConf
    +     JobConf job = new JobConf(new Configuration(), MyJob.class);
    +     
    +     // Specify various job-specific parameters     
    +     job.setJobName("myjob");
    +     
    +     job.setInputPath(new Path("in"));
    +     job.setOutputPath(new Path("out"));
    +     
    +     job.setMapperClass(MyJob.MyMapper.class);
    +     job.setReducerClass(MyJob.MyReducer.class);
    +
    +     // Submit the job, then poll for progress until the job is complete
    +     JobClient.runJob(job);
    + 
    + + Job Control + +

    At times clients would chain map-reduce jobs to accomplish complex tasks + which cannot be done via a single map-reduce job. This is fairly easy since + the output of the job, typically, goes to distributed file-system and that + can be used as the input for the next job.

    + +

    However, this also means that the onus on ensuring jobs are complete + (success/failure) lies squarely on the clients. In such situations the + various job-control options are: +

      +
    1. + {@link #runJob(JobConf)} : submits the job and returns only after + the job has completed. +
    2. +
    3. + {@link #submitJob(JobConf)} : only submits the job, then poll the + returned handle to the {@link RunningJob} to query status and make + scheduling decisions. +
    4. +
    5. + {@link JobConf#setJobEndNotificationURI(String)} : setup a notification + on job-completion, thus avoiding polling. +
    6. +
    + + @see JobConf + @see ClusterStatus + @see Tool + @see DistributedCache]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + If the parameter {@code loadDefaults} is false, the new instance + will not load resources from the default files. + + @param loadDefaults specifies whether to load from the default files]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if framework should keep the intermediate files + for failed tasks, false otherwise.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if the outputs of the maps are to be compressed, + false otherwise.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This comparator should be provided if the equivalence rules for keys + for sorting the intermediates are different from those for grouping keys + before each call to + {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.

    + +

    For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed + in a single call to the reduce function if K1 and K2 compare as equal.

    + +

    Since {@link #setOutputKeyComparatorClass(Class)} can be used to control + how keys are sorted, this can be used in conjunction to simulate + secondary sort on values.

    + +

    Note: This is not a guarantee of the combiner sort being + stable in any sense. (In any case, with the order of available + map-outputs to the combiner being non-deterministic, it wouldn't make + that much sense.)

    + + @param theClass the comparator class to be used for grouping keys for the + combiner. It should implement RawComparator. + @see #setOutputKeyComparatorClass(Class)]]> +
    +
    + + + + This comparator should be provided if the equivalence rules for keys + for sorting the intermediates are different from those for grouping keys + before each call to + {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.

    + +

    For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed + in a single call to the reduce function if K1 and K2 compare as equal.

    + +

    Since {@link #setOutputKeyComparatorClass(Class)} can be used to control + how keys are sorted, this can be used in conjunction to simulate + secondary sort on values.

    + +

    Note: This is not a guarantee of the reduce sort being + stable in any sense. (In any case, with the order of available + map-outputs to the reduce being non-deterministic, it wouldn't make + that much sense.)

    + + @param theClass the comparator class to be used for grouping keys. + It should implement RawComparator. + @see #setOutputKeyComparatorClass(Class) + @see #setCombinerKeyGroupingComparator(Class)]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + combiner class used to combine map-outputs + before being sent to the reducers. Typically the combiner is same as the + the {@link Reducer} for the job i.e. {@link #getReducerClass()}. + + @return the user-defined combiner class used to combine map-outputs.]]> + + + + + + combiner class used to combine map-outputs + before being sent to the reducers. + +

    The combiner is an application-specified aggregation operation, which + can help cut down the amount of data transferred between the + {@link Mapper} and the {@link Reducer}, leading to better performance.

    + +

    The framework may invoke the combiner 0, 1, or multiple times, in both + the mapper and reducer tasks. In general, the combiner is called as the + sort/merge result is written to disk. The combiner must: +

      +
    • be side-effect free
    • +
    • have the same input and output key types and the same input and + output value types
    • +
    + +

    Typically the combiner is same as the Reducer for the + job i.e. {@link #setReducerClass(Class)}.

    + + @param theClass the user-defined combiner class used to combine + map-outputs.]]> +
    +
    + + + true. + + @return true if speculative execution be used for this job, + false otherwise.]]> + + + + + + true if speculative execution + should be turned on, else false.]]> + + + + + true. + + @return true if speculative execution be + used for this job for map tasks, + false otherwise.]]> + + + + + + true if speculative execution + should be turned on for map tasks, + else false.]]> + + + + + true. + + @return true if speculative execution be used + for reduce tasks for this job, + false otherwise.]]> + + + + + + true if speculative execution + should be turned on for reduce tasks, + else false.]]> + + + + + 1. + + @return the number of map tasks for this job.]]> + + + + + + Note: This is only a hint to the framework. The actual + number of spawned map tasks depends on the number of {@link InputSplit}s + generated by the job's {@link InputFormat#getSplits(JobConf, int)}. + + A custom {@link InputFormat} is typically used to accurately control + the number of map tasks for the job.

    + + How many maps? + +

    The number of maps is usually driven by the total size of the inputs + i.e. total number of blocks of the input files.

    + +

    The right level of parallelism for maps seems to be around 10-100 maps + per-node, although it has been set up to 300 or so for very cpu-light map + tasks. Task setup takes awhile, so it is best if the maps take at least a + minute to execute.

    + +

    The default behavior of file-based {@link InputFormat}s is to split the + input into logical {@link InputSplit}s based on the total size, in + bytes, of input files. However, the {@link FileSystem} blocksize of the + input files is treated as an upper bound for input splits. A lower bound + on the split size can be set via + + mapreduce.input.fileinputformat.split.minsize.

    + +

    Thus, if you expect 10TB of input data and have a blocksize of 128MB, + you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is + used to set it even higher.

    + + @param n the number of map tasks for this job. + @see InputFormat#getSplits(JobConf, int) + @see FileInputFormat + @see FileSystem#getDefaultBlockSize() + @see FileStatus#getBlockSize()]]> +
    +
    + + + 1. + + @return the number of reduce tasks for this job.]]> + + + + + + How many reduces? + +

    The right number of reduces seems to be 0.95 or + 1.75 multiplied by ( + available memory for reduce tasks + (The value of this should be smaller than + numNodes * yarn.nodemanager.resource.memory-mb + since the resource of memory is shared by map tasks and other + applications) / + + mapreduce.reduce.memory.mb). +

    + +

    With 0.95 all of the reduces can launch immediately and + start transfering map outputs as the maps finish. With 1.75 + the faster nodes will finish their first round of reduces and launch a + second wave of reduces doing a much better job of load balancing.

    + +

    Increasing the number of reduces increases the framework overhead, but + increases load balancing and lowers the cost of failures.

    + +

    The scaling factors above are slightly less than whole numbers to + reserve a few reduce slots in the framework for speculative-tasks, failures + etc.

    + + Reducer NONE + +

    It is legal to set the number of reduce-tasks to zero.

    + +

    In this case the output of the map-tasks directly go to distributed + file-system, to the path set by + {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the + framework doesn't sort the map-outputs before writing it out to HDFS.

    + + @param n the number of reduce tasks for this job.]]> +
    +
    + + + mapreduce.map.maxattempts + property. If this property is not already set, the default is 4 attempts. + + @return the max number of attempts per map task.]]> + + + + + + + + + + + mapreduce.reduce.maxattempts + property. If this property is not already set, the default is 4 attempts. + + @return the max number of attempts per reduce task.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + noFailures, the + tasktracker is blacklisted for this job. + + @param noFailures maximum no. of failures of a given job per tasktracker.]]> + + + + + blacklisted for this job. + + @return the maximum no. of failures of a given job per tasktracker.]]> + + + + + failed. + + Defaults to zero, i.e. any failed map-task results in + the job being declared as {@link JobStatus#FAILED}. + + @return the maximum percentage of map tasks that can fail without + the job being aborted.]]> + + + + + + failed. + + @param percent the maximum percentage of map tasks that can fail without + the job being aborted.]]> + + + + + failed. + + Defaults to zero, i.e. any failed reduce-task results + in the job being declared as {@link JobStatus#FAILED}. + + @return the maximum percentage of reduce tasks that can fail without + the job being aborted.]]> + + + + + + failed. + + @param percent the maximum percentage of reduce tasks that can fail without + the job being aborted.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The debug script can aid debugging of failed map tasks. The script is + given task's stdout, stderr, syslog, jobconf files as arguments.

    + +

    The debug command, run on the node where the map failed, is:

    +

    + $script $stdout $stderr $syslog $jobconf.
    + 
    + +

    The script file is distributed through {@link DistributedCache} + APIs. The script needs to be symlinked.

    + +

    Here is an example on how to submit a script +

    + job.setMapDebugScript("./myscript");
    + DistributedCache.createSymlink(job);
    + DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
    + 
    + + @param mDbgScript the script name]]> +
    +
    + + + + + + + + + The debug script can aid debugging of failed reduce tasks. The script + is given task's stdout, stderr, syslog, jobconf files as arguments.

    + +

    The debug command, run on the node where the map failed, is:

    +

    + $script $stdout $stderr $syslog $jobconf.
    + 
    + +

    The script file is distributed through {@link DistributedCache} + APIs. The script file needs to be symlinked

    + +

    Here is an example on how to submit a script +

    + job.setReduceDebugScript("./myscript");
    + DistributedCache.createSymlink(job);
    + DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
    + 
    + + @param rDbgScript the script name]]> +
    +
    + + + + + + + + null if it hasn't + been set. + @see #setJobEndNotificationURI(String)]]> + + + + + + The uri can contain 2 special parameters: $jobId and + $jobStatus. Those, if present, are replaced by the job's + identifier and completion-status respectively.

    + +

    This is typically used by application-writers to implement chaining of + Map-Reduce jobs in an asynchronous manner.

    + + @param uri the job end notification uri + @see JobStatus]]> +
    +
    + + + + + + + + + + + + + + + When a job starts, a shared directory is created at location + + ${mapreduce.cluster.local.dir}/taskTracker/$user/jobcache/$jobid/work/ . + This directory is exposed to the users through + mapreduce.job.local.dir . + So, the tasks can use this space + as scratch space and share files among them.

    + This value is available as System property also. + + @return The localized job specific shared directory]]> +
    +
    + + + + For backward compatibility, if the job configuration sets the + key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different + from {@link #DISABLED_MEMORY_LIMIT}, that value will be used + after converting it from bytes to MB. + @return memory required to run a map task of the job, in MB,]]> + + + + + + + + + For backward compatibility, if the job configuration sets the + key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different + from {@link #DISABLED_MEMORY_LIMIT}, that value will be used + after converting it from bytes to MB. + @return memory required to run a reduce task of the job, in MB.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This method is deprecated. Now, different memory limits can be + set for map and reduce tasks of a job, in MB. +

    + For backward compatibility, if the job configuration sets the + key {@link #MAPRED_TASK_MAXVMEM_PROPERTY}, that value is returned. + Otherwise, this method will return the larger of the values returned by + {@link #getMemoryForMapTask()} and {@link #getMemoryForReduceTask()} + after converting them into bytes. + + @return Memory required to run a task of this job, in bytes. + @see #setMaxVirtualMemoryForTask(long) + @deprecated Use {@link #getMemoryForMapTask()} and + {@link #getMemoryForReduceTask()}]]> + + + + + + + mapred.task.maxvmem is split into + mapreduce.map.memory.mb + and mapreduce.map.memory.mb,mapred + each of the new key are set + as mapred.task.maxvmem / 1024 + as new values are in MB + + @param vmem Maximum amount of virtual memory in bytes any task of this job + can use. + @see #getMaxVirtualMemoryForTask() + @deprecated + Use {@link #setMemoryForMapTask(long mem)} and + Use {@link #setMemoryForReduceTask(long mem)}]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + k1=v1,k2=v2. Further it can + reference existing environment variables via $key on + Linux or %key% on Windows. + + Example: +

      +
    • A=foo - This will set the env variable A to foo.
    • +
    + + @deprecated Use {@link #MAPRED_MAP_TASK_ENV} or + {@link #MAPRED_REDUCE_TASK_ENV}]]> +
    + + + + k1=v1,k2=v2. Further it can + reference existing environment variables via $key on + Linux or %key% on Windows. + + Example: +
      +
    • A=foo - This will set the env variable A to foo.
    • +
    + + You can also add environment variables individually by appending + .VARNAME to this configuration key, where VARNAME is + the name of the environment variable. + + Example: +
      +
    • mapreduce.map.env.VARNAME=value
    • +
    ]]> +
    +
    + + + k1=v1,k2=v2. Further it can + reference existing environment variables via $key on + Linux or %key% on Windows. + + Example: +
      +
    • A=foo - This will set the env variable A to foo.
    • +
    + + You can also add environment variables individually by appending + .VARNAME to this configuration key, where VARNAME is + the name of the environment variable. + + Example: +
      +
    • mapreduce.reduce.env.VARNAME=value
    • +
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + JobConf is the primary interface for a user to describe a + map-reduce job to the Hadoop framework for execution. The framework tries to + faithfully execute the job as-is described by JobConf, however: +
      +
    1. + Some configuration parameters might have been marked as + + final by administrators and hence cannot be altered. +
    2. +
    3. + While some job parameters are straight-forward to set + (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly + with the rest of the framework and/or job-configuration and is relatively + more complex for the user to control finely + (e.g. {@link #setNumMapTasks(int)}). +
    4. +
    + +

    JobConf typically specifies the {@link Mapper}, combiner + (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and + {@link OutputFormat} implementations to be used etc. + +

    Optionally JobConf is used to specify other advanced facets + of the job such as Comparators to be used, files to be put in + the {@link DistributedCache}, whether or not intermediate and/or job outputs + are to be compressed (and how), debugability via user-provided scripts + ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}), + for doing post-processing on task logs, task's stdout, stderr, syslog. + and etc.

    + +

    Here is an example on how to configure a job via JobConf:

    +

    +     // Create a new JobConf
    +     JobConf job = new JobConf(new Configuration(), MyJob.class);
    +     
    +     // Specify various job-specific parameters     
    +     job.setJobName("myjob");
    +     
    +     FileInputFormat.setInputPaths(job, new Path("in"));
    +     FileOutputFormat.setOutputPath(job, new Path("out"));
    +     
    +     job.setMapperClass(MyJob.MyMapper.class);
    +     job.setCombinerClass(MyJob.MyReducer.class);
    +     job.setReducerClass(MyJob.MyReducer.class);
    +     
    +     job.setInputFormat(SequenceFileInputFormat.class);
    +     job.setOutputFormat(SequenceFileOutputFormat.class);
    + 
    + + @see JobClient + @see ClusterStatus + @see Tool + @see DistributedCache]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + any job + run on the jobtracker started at 200707121733, we would use : +
     
    + JobID.getTaskIDsPattern("200707121733", null);
    + 
    + which will return : +
     "job_200707121733_[0-9]*" 
    + @param jtIdentifier jobTracker identifier, or null + @param jobId job number, or null + @return a regex pattern matching JobIDs]]> +
    +
    + + + An example JobID is : + job_200707121733_0003 , which represents the third job + running at the jobtracker started at 200707121733. +

    + Applications should never construct or parse JobID strings, but rather + use appropriate constructors or {@link #forName(String)} method. + + @see TaskID + @see TaskAttemptID]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Output pairs need not be of the same types as input pairs. A given + input pair may map to zero or many output pairs. Output pairs are + collected with calls to + {@link OutputCollector#collect(Object,Object)}.

    + +

    Applications can use the {@link Reporter} provided to report progress + or just indicate that they are alive. In scenarios where the application + takes significant amount of time to process individual key/value + pairs, this is crucial since the framework might assume that the task has + timed-out and kill that task. The other way of avoiding this is to set + + mapreduce.task.timeout to a high-enough value (or even zero for no + time-outs).

    + + @param key the input key. + @param value the input value. + @param output collects mapped keys and values. + @param reporter facility to report progress.]]> +
    + + + Maps are the individual tasks which transform input records into a + intermediate records. The transformed intermediate records need not be of + the same type as the input records. A given input pair may map to zero or + many output pairs.

    + +

    The Hadoop Map-Reduce framework spawns one map task for each + {@link InputSplit} generated by the {@link InputFormat} for the job. + Mapper implementations can access the {@link JobConf} for the + job via the {@link JobConfigurable#configure(JobConf)} and initialize + themselves. Similarly they can use the {@link Closeable#close()} method for + de-initialization.

    + +

    The framework then calls + {@link #map(Object, Object, OutputCollector, Reporter)} + for each key/value pair in the InputSplit for that task.

    + +

    All intermediate values associated with a given output key are + subsequently grouped by the framework, and passed to a {@link Reducer} to + determine the final output. Users can control the grouping by specifying + a Comparator via + {@link JobConf#setOutputKeyComparatorClass(Class)}.

    + +

    The grouped Mapper outputs are partitioned per + Reducer. Users can control which keys (and hence records) go to + which Reducer by implementing a custom {@link Partitioner}. + +

    Users can optionally specify a combiner, via + {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the + intermediate outputs, which helps to cut down the amount of data transferred + from the Mapper to the Reducer. + +

    The intermediate, grouped outputs are always stored in + {@link SequenceFile}s. Applications can specify if and how the intermediate + outputs are to be compressed and which {@link CompressionCodec}s are to be + used via the JobConf.

    + +

    If the job has + zero + reduces then the output of the Mapper is directly written + to the {@link FileSystem} without grouping by keys.

    + +

    Example:

    +

    +     public class MyMapper<K extends WritableComparable, V extends Writable> 
    +     extends MapReduceBase implements Mapper<K, V, K, V> {
    +     
    +       static enum MyCounters { NUM_RECORDS }
    +       
    +       private String mapTaskId;
    +       private String inputFile;
    +       private int noRecords = 0;
    +       
    +       public void configure(JobConf job) {
    +         mapTaskId = job.get(JobContext.TASK_ATTEMPT_ID);
    +         inputFile = job.get(JobContext.MAP_INPUT_FILE);
    +       }
    +       
    +       public void map(K key, V val,
    +                       OutputCollector<K, V> output, Reporter reporter)
    +       throws IOException {
    +         // Process the <key, value> pair (assume this takes a while)
    +         // ...
    +         // ...
    +         
    +         // Let the framework know that we are alive, and kicking!
    +         // reporter.progress();
    +         
    +         // Process some more
    +         // ...
    +         // ...
    +         
    +         // Increment the no. of <key, value> pairs processed
    +         ++noRecords;
    +
    +         // Increment counters
    +         reporter.incrCounter(NUM_RECORDS, 1);
    +        
    +         // Every 100 records update application-level status
    +         if ((noRecords%100) == 0) {
    +           reporter.setStatus(mapTaskId + " processed " + noRecords + 
    +                              " from input-file: " + inputFile); 
    +         }
    +         
    +         // Output the result
    +         output.collect(key, val);
    +       }
    +     }
    + 
    + +

    Applications may write a custom {@link MapRunnable} to exert greater + control on map processing e.g. multi-threaded Mappers etc.

    + + @see JobConf + @see InputFormat + @see Partitioner + @see Reducer + @see MapReduceBase + @see MapRunnable + @see SequenceFile]]> +
    + + + + + + + + + + + + + + + + + + + + + + Provides default no-op implementations for a few methods, most non-trivial + applications need to override some of them.

    ]]> +
    +
    + + + + + + + + + + + <key, value> pairs. + +

    Mapping of input records to output records is complete when this method + returns.

    + + @param input the {@link RecordReader} to read the input records. + @param output the {@link OutputCollector} to collect the outputrecords. + @param reporter {@link Reporter} to report progress, status-updates etc. + @throws IOException]]> +
    +
    + + Custom implementations of MapRunnable can exert greater + control on map processing e.g. multi-threaded, asynchronous mappers etc.

    + + @see Mapper]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nearly + equal content length.
    + Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)} + to construct RecordReader's for MultiFileSplit's. + @see MultiFileSplit]]> +
    +
    + + + + + + + + + + + + + MultiFileSplit can be used to implement {@link RecordReader}'s, with + reading one record per file. + @see FileSplit + @see MultiFileInputFormat]]> + + + + + + + + + + + + + + + <key, value> pairs output by {@link Mapper}s + and {@link Reducer}s. + +

    OutputCollector is the generalization of the facility + provided by the Map-Reduce framework to collect data output by either the + Mapper or the Reducer i.e. intermediate outputs + or the output of the job.

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if task output recovery is supported, + false otherwise + @throws IOException + @see #recoverTask(TaskAttemptContext)]]> + + + + + + + true repeatable job commit is supported, + false otherwise + @throws IOException]]> + + + + + + + + + + + OutputCommitter. This is called from the application master + process, but it is called individually for each task. + + If an exception is thrown the task will be attempted again. + + @param taskContext Context of the task whose output is being recovered + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OutputCommitter describes the commit of task output for a + Map-Reduce job. + +

    The Map-Reduce framework relies on the OutputCommitter of + the job to:

    +

      +
    1. + Setup the job during initialization. For example, create the temporary + output directory for the job during the initialization of the job. +
    2. +
    3. + Cleanup the job after the job completion. For example, remove the + temporary output directory after the job completion. +
    4. +
    5. + Setup the task temporary output. +
    6. +
    7. + Check whether a task needs a commit. This is to avoid the commit + procedure if a task does not need commit. +
    8. +
    9. + Commit of the task output. +
    10. +
    11. + Discard the task commit. +
    12. +
    + The methods in this class can be called from several different processes and + from several different contexts. It is important to know which process and + which context each is called from. Each method should be marked accordingly + in its documentation. It is also important to note that not all methods are + guaranteed to be called once and only once. If a method is not guaranteed to + have this property the output committer needs to handle this appropriately. + Also note it will only be in rare situations where they may be called + multiple times for the same task. + + @see FileOutputCommitter + @see JobContext + @see TaskAttemptContext]]> +
    +
    + + + + + + + + + + + + + + + + + + + This is to validate the output specification for the job when it is + a job is submitted. Typically checks that it does not already exist, + throwing an exception when it already exists, so that output is not + overwritten.

    + + @param ignored + @param job job configuration. + @throws IOException when output should not be attempted]]> +
    +
    + + OutputFormat describes the output-specification for a + Map-Reduce job. + +

    The Map-Reduce framework relies on the OutputFormat of the + job to:

    +

      +
    1. + Validate the output-specification of the job. For e.g. check that the + output directory doesn't already exist. +
    2. + Provide the {@link RecordWriter} implementation to be used to write out + the output files of the job. Output files are stored in a + {@link FileSystem}. +
    3. +
    + + @see RecordWriter + @see JobConf]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + Typically a hash function on a all or a subset of the key.

    + + @param key the key to be paritioned. + @param value the entry value. + @param numPartitions the total number of partitions. + @return the partition number for the key.]]> +
    +
    + + Partitioner controls the partitioning of the keys of the + intermediate map-outputs. The key (or a subset of the key) is used to derive + the partition, typically by a hash function. The total number of partitions + is the same as the number of reduce tasks for the job. Hence this controls + which of the m reduce tasks the intermediate key (and hence the + record) is sent for reduction.

    + +

    Note: A Partitioner is created only when there are multiple + reducers.

    + + @see Reducer]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.0 to 1.0. + @throws IOException]]> + + + + RecordReader reads <key, value> pairs from an + {@link InputSplit}. + +

    RecordReader, typically, converts the byte-oriented view of + the input, provided by the InputSplit, and presents a + record-oriented view for the {@link Mapper} and {@link Reducer} tasks for + processing. It thus assumes the responsibility of processing record + boundaries and presenting the tasks with keys and values.

    + + @see InputSplit + @see InputFormat]]> +
    +
    + + + + + + + + + + + + + + + + RecordWriter to future operations. + + @param reporter facility to report progress. + @throws IOException]]> + + + + RecordWriter writes the output <key, value> pairs + to an output file. + +

    RecordWriter implementations write the job outputs to the + {@link FileSystem}. + + @see OutputFormat]]> + + + + + + + + + + + + + + + Reduces values for a given key. + +

    The framework calls this method for each + <key, (list of values)> pair in the grouped inputs. + Output values must be of the same type as input values. Input keys must + not be altered. The framework will reuse the key and value objects + that are passed into the reduce, therefore the application should clone + the objects they want to keep a copy of. In many cases, all values are + combined into zero or one value. +

    + +

    Output pairs are collected with calls to + {@link OutputCollector#collect(Object,Object)}.

    + +

    Applications can use the {@link Reporter} provided to report progress + or just indicate that they are alive. In scenarios where the application + takes a significant amount of time to process individual key/value + pairs, this is crucial since the framework might assume that the task has + timed-out and kill that task. The other way of avoiding this is to set + + mapreduce.task.timeout to a high-enough value (or even zero for no + time-outs).

    + + @param key the key. + @param values the list of values to reduce. + @param output to collect keys and combined values. + @param reporter facility to report progress.]]> +
    + + + The number of Reducers for the job is set by the user via + {@link JobConf#setNumReduceTasks(int)}. Reducer implementations + can access the {@link JobConf} for the job via the + {@link JobConfigurable#configure(JobConf)} method and initialize themselves. + Similarly they can use the {@link Closeable#close()} method for + de-initialization.

    + +

    Reducer has 3 primary phases:

    +
      +
    1. + + Shuffle + +

      Reducer is input the grouped output of a {@link Mapper}. + In the phase the framework, for each Reducer, fetches the + relevant partition of the output of all the Mappers, via HTTP. +

      +
    2. + +
    3. + Sort + +

      The framework groups Reducer inputs by keys + (since different Mappers may have output the same key) in this + stage.

      + +

      The shuffle and sort phases occur simultaneously i.e. while outputs are + being fetched they are merged.

      + + SecondarySort + +

      If equivalence rules for keys while grouping the intermediates are + different from those for grouping keys before reduction, then one may + specify a Comparator via + {@link JobConf#setOutputValueGroupingComparator(Class)}.Since + {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to + control how intermediate keys are grouped, these can be used in conjunction + to simulate secondary sort on values.

      + + + For example, say that you want to find duplicate web pages and tag them + all with the url of the "best" known example. You would set up the job + like: +
        +
      • Map Input Key: url
      • +
      • Map Input Value: document
      • +
      • Map Output Key: document checksum, url pagerank
      • +
      • Map Output Value: url
      • +
      • Partitioner: by checksum
      • +
      • OutputKeyComparator: by checksum and then decreasing pagerank
      • +
      • OutputValueGroupingComparator: by checksum
      • +
      +
    4. + +
    5. + Reduce + +

      In this phase the + {@link #reduce(Object, Iterator, OutputCollector, Reporter)} + method is called for each <key, (list of values)> pair in + the grouped inputs.

      +

      The output of the reduce task is typically written to the + {@link FileSystem} via + {@link OutputCollector#collect(Object, Object)}.

      +
    6. +
    + +

    The output of the Reducer is not re-sorted.

    + +

    Example:

    +

    +     public class MyReducer<K extends WritableComparable, V extends Writable> 
    +     extends MapReduceBase implements Reducer<K, V, K, V> {
    +     
    +       static enum MyCounters { NUM_RECORDS }
    +        
    +       private String reduceTaskId;
    +       private int noKeys = 0;
    +       
    +       public void configure(JobConf job) {
    +         reduceTaskId = job.get(JobContext.TASK_ATTEMPT_ID);
    +       }
    +       
    +       public void reduce(K key, Iterator<V> values,
    +                          OutputCollector<K, V> output, 
    +                          Reporter reporter)
    +       throws IOException {
    +       
    +         // Process
    +         int noValues = 0;
    +         while (values.hasNext()) {
    +           V value = values.next();
    +           
    +           // Increment the no. of values for this key
    +           ++noValues;
    +           
    +           // Process the <key, value> pair (assume this takes a while)
    +           // ...
    +           // ...
    +           
    +           // Let the framework know that we are alive, and kicking!
    +           if ((noValues%10) == 0) {
    +             reporter.progress();
    +           }
    +         
    +           // Process some more
    +           // ...
    +           // ...
    +           
    +           // Output the <key, value> 
    +           output.collect(key, value);
    +         }
    +         
    +         // Increment the no. of <key, list of values> pairs processed
    +         ++noKeys;
    +         
    +         // Increment counters
    +         reporter.incrCounter(NUM_RECORDS, 1);
    +         
    +         // Every 100 keys update application-level status
    +         if ((noKeys%100) == 0) {
    +           reporter.setStatus(reduceTaskId + " processed " + noKeys);
    +         }
    +       }
    +     }
    + 
    + + @see Mapper + @see Partitioner + @see Reporter + @see MapReduceBase]]> +
    +
    + + + + + + + + + + + + + + Counter of the given group/name.]]> + + + + + + + Counter of the given group/name.]]> + + + + + + + Enum. + @param amount A non-negative amount by which the counter is to + be incremented.]]> + + + + + + + + + + + + + + InputSplit that the map is reading from. + @throws UnsupportedOperationException if called outside a mapper]]> + + + + + + + + + + + + + + {@link Mapper} and {@link Reducer} can use the Reporter + provided to report progress or just indicate that they are alive. In + scenarios where the application takes significant amount of time to + process individual key/value pairs, this is crucial since the framework + might assume that the task has timed-out and kill that task. + +

    Applications can also update {@link Counters} via the provided + Reporter .

    + + @see Progressable + @see Counters]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + progress of the job's map-tasks, as a float between 0.0 + and 1.0. When all map tasks have completed, the function returns 1.0. + + @return the progress of the job's map-tasks. + @throws IOException]]> + + + + + + progress of the job's reduce-tasks, as a float between 0.0 + and 1.0. When all reduce tasks have completed, the function returns 1.0. + + @return the progress of the job's reduce-tasks. + @throws IOException]]> + + + + + + progress of the job's cleanup-tasks, as a float between 0.0 + and 1.0. When all cleanup tasks have completed, the function returns 1.0. + + @return the progress of the job's cleanup-tasks. + @throws IOException]]> + + + + + + progress of the job's setup-tasks, as a float between 0.0 + and 1.0. When all setup tasks have completed, the function returns 1.0. + + @return the progress of the job's setup-tasks. + @throws IOException]]> + + + + + + true if the job is complete, else false. + @throws IOException]]> + + + + + + true if the job succeeded, else false. + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if the job retired, else false. + @throws IOException]]> + + + + + + + + + + RunningJob is the user-interface to query for details on a + running Map-Reduce job. + +

    Clients can get hold of RunningJob via the {@link JobClient} + and then query the running-job for details such as name, configuration, + progress etc.

    + + @see JobClient]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + This allows the user to specify the key class to be different + from the actual class ({@link BytesWritable}) used for writing

    + + @param conf the {@link JobConf} to modify + @param theClass the SequenceFile output key class.]]> +
    +
    + + + + + This allows the user to specify the value class to be different + from the actual class ({@link BytesWritable}) used for writing

    + + @param conf the {@link JobConf} to modify + @param theClass the SequenceFile output key class.]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if auto increment + {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}. + false otherwise.]]> + + + + + + + + + + + + + true if auto increment + {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}. + false otherwise.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Hadoop provides an optional mode of execution in which the bad records + are detected and skipped in further attempts. + +

    This feature can be used when map/reduce tasks crashes deterministically on + certain input. This happens due to bugs in the map/reduce function. The usual + course would be to fix these bugs. But sometimes this is not possible; + perhaps the bug is in third party libraries for which the source code is + not available. Due to this, the task never reaches to completion even with + multiple attempts and complete data for that task is lost.

    + +

    With this feature, only a small portion of data is lost surrounding + the bad record, which may be acceptable for some user applications. + see {@link SkipBadRecords#setMapperMaxSkipRecords(Configuration, long)}

    + +

    The skipping mode gets kicked off after certain no of failures + see {@link SkipBadRecords#setAttemptsToStartSkipping(Configuration, int)}

    + +

    In the skipping mode, the map/reduce task maintains the record range which + is getting processed at all times. Before giving the input to the + map/reduce function, it sends this record range to the Task tracker. + If task crashes, the Task tracker knows which one was the last reported + range. On further attempts that range get skipped.

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + all task attempt IDs + of any jobtracker, in any job, of the first + map task, we would use : +
     
    + TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
    + 
    + which will return : +
     "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" 
    + @param jtIdentifier jobTracker identifier, or null + @param jobId job number, or null + @param isMap whether the tip is a map, or null + @param taskId taskId number, or null + @param attemptId the task attempt number, or null + @return a regex pattern matching TaskAttemptIDs]]> +
    +
    + + + + + + + + all task attempt IDs + of any jobtracker, in any job, of the first + map task, we would use : +
     
    + TaskAttemptID.getTaskAttemptIDsPattern(null, null, TaskType.MAP, 1, null);
    + 
    + which will return : +
     "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" 
    + @param jtIdentifier jobTracker identifier, or null + @param jobId job number, or null + @param type the {@link TaskType} + @param taskId taskId number, or null + @param attemptId the task attempt number, or null + @return a regex pattern matching TaskAttemptIDs]]> +
    +
    + + + An example TaskAttemptID is : + attempt_200707121733_0003_m_000005_0 , which represents the + zeroth task attempt for the fifth map task in the third job + running at the jobtracker started at 200707121733. +

    + Applications should never construct or parse TaskAttemptID strings + , but rather use appropriate constructors or {@link #forName(String)} + method. + + @see JobID + @see TaskID]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + the first map task + of any jobtracker, of any job, we would use : +

     
    + TaskID.getTaskIDsPattern(null, null, true, 1);
    + 
    + which will return : +
     "task_[^_]*_[0-9]*_m_000001*" 
    + @param jtIdentifier jobTracker identifier, or null + @param jobId job number, or null + @param isMap whether the tip is a map, or null + @param taskId taskId number, or null + @return a regex pattern matching TaskIDs + @deprecated Use {@link TaskID#getTaskIDsPattern(String, Integer, TaskType, + Integer)}]]> +
    + + + + + + + + the first map task + of any jobtracker, of any job, we would use : +
     
    + TaskID.getTaskIDsPattern(null, null, true, 1);
    + 
    + which will return : +
     "task_[^_]*_[0-9]*_m_000001*" 
    + @param jtIdentifier jobTracker identifier, or null + @param jobId job number, or null + @param type the {@link TaskType}, or null + @param taskId taskId number, or null + @return a regex pattern matching TaskIDs]]> +
    +
    + + + + + + + An example TaskID is : + task_200707121733_0003_m_000005 , which represents the + fifth map task in the third job running at the jobtracker + started at 200707121733. +

    + Applications should never construct or parse TaskID strings + , but rather use appropriate constructors or {@link #forName(String)} + method. + + @see JobID + @see TaskAttemptID]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if the Job was added.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ([,]*) + func ::= tbl(,"") + class ::= @see java.lang.Class#forName(java.lang.String) + path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String) + } + Reads expression from the mapred.join.expr property and + user-supplied join types from mapred.join.define.<ident> + types. Paths supplied to tbl are given as input paths to the + InputFormat class listed. + @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ,

    ) }]]> + + + + + + + + (tbl(,),tbl(,),...,tbl(,)) }]]> + + + + + + + + (tbl(,),tbl(,),...,tbl(,)) }]]> + + + + mapred.join.define.<ident> to a classname. In the expression + mapred.join.expr, the identifier will be assumed to be a + ComposableRecordReader. + mapred.join.keycomparator can be a classname used to compare keys + in the join. + @see #setFormat + @see JoinRecordReader + @see MultiFilterRecordReader]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ...... + }]]> + + + + + + + + + + + + + + + + + + + + + capacity children to position + id in the parent reader. + The id of a root CompositeRecordReader is -1 by convention, but relying + on this is not recommended.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + override(S1,S2,S3) will prefer values + from S3 over S2, and values from S2 over S1 for all keys + emitted from all sources.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + It has to be specified how key and values are passed from one element of + the chain to the next, by value or by reference. If a Mapper leverages the + assumed semantics that the key and values are not modified by the collector + 'by value' must be used. If the Mapper does not expect this semantics, as + an optimization to avoid serialization and deserialization 'by reference' + can be used. +

    + For the added Mapper the configuration given for it, + mapperConf, have precedence over the job's JobConf. This + precedence is in effect when the task is running. +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainMapper, this is done by the addMapper for the last mapper in the chain +

    + + @param job job's JobConf to add the Mapper class. + @param klass the Mapper class to add. + @param inputKeyClass mapper input key class. + @param inputValueClass mapper input value class. + @param outputKeyClass mapper output key class. + @param outputValueClass mapper output value class. + @param byValue indicates if key/values should be passed by value + to the next Mapper in the chain, if any. + @param mapperConf a JobConf with the configuration for the Mapper + class. It is recommended to use a JobConf without default values using the + JobConf(boolean loadDefaults) constructor with FALSE.]]> + + + + + + + If this method is overriden super.configure(...) should be + invoked at the beginning of the overwriter method.]]> + + + + + + + + + + map(...) methods of the Mappers in the chain.]]> + + + + + + + If this method is overriden super.close() should be + invoked at the end of the overwriter method.]]> + + + + + The Mapper classes are invoked in a chained (or piped) fashion, the output of + the first becomes the input of the second, and so on until the last Mapper, + the output of the last Mapper will be written to the task's output. +

    + The key functionality of this feature is that the Mappers in the chain do not + need to be aware that they are executed in a chain. This enables having + reusable specialized Mappers that can be combined to perform composite + operations within a single task. +

    + Special care has to be taken when creating chains that the key/values output + by a Mapper are valid for the following Mapper in the chain. It is assumed + all Mappers and the Reduce in the chain use maching output and input key and + value classes as no conversion is done by the chaining code. +

    + Using the ChainMapper and the ChainReducer classes is possible to compose + Map/Reduce jobs that look like [MAP+ / REDUCE MAP*]. And + immediate benefit of this pattern is a dramatic reduction in disk IO. +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainMapper, this is done by the addMapper for the last mapper in the chain. +

    + ChainMapper usage pattern: +

    +

    + ...
    + conf.setJobName("chain");
    + conf.setInputFormat(TextInputFormat.class);
    + conf.setOutputFormat(TextOutputFormat.class);
    +
    + JobConf mapAConf = new JobConf(false);
    + ...
    + ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
    +   Text.class, Text.class, true, mapAConf);
    +
    + JobConf mapBConf = new JobConf(false);
    + ...
    + ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
    +   LongWritable.class, Text.class, false, mapBConf);
    +
    + JobConf reduceConf = new JobConf(false);
    + ...
    + ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
    +   Text.class, Text.class, true, reduceConf);
    +
    + ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
    +   LongWritable.class, Text.class, false, null);
    +
    + ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
    +   LongWritable.class, LongWritable.class, true, null);
    +
    + FileInputFormat.setInputPaths(conf, inDir);
    + FileOutputFormat.setOutputPath(conf, outDir);
    + ...
    +
    + JobClient jc = new JobClient(conf);
    + RunningJob job = jc.submitJob(conf);
    + ...
    + 
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + It has to be specified how key and values are passed from one element of + the chain to the next, by value or by reference. If a Reducer leverages the + assumed semantics that the key and values are not modified by the collector + 'by value' must be used. If the Reducer does not expect this semantics, as + an optimization to avoid serialization and deserialization 'by reference' + can be used. +

    + For the added Reducer the configuration given for it, + reducerConf, have precedence over the job's JobConf. This + precedence is in effect when the task is running. +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainReducer, this is done by the setReducer or the addMapper for the last + element in the chain. + + @param job job's JobConf to add the Reducer class. + @param klass the Reducer class to add. + @param inputKeyClass reducer input key class. + @param inputValueClass reducer input value class. + @param outputKeyClass reducer output key class. + @param outputValueClass reducer output value class. + @param byValue indicates if key/values should be passed by value + to the next Mapper in the chain, if any. + @param reducerConf a JobConf with the configuration for the Reducer + class. It is recommended to use a JobConf without default values using the + JobConf(boolean loadDefaults) constructor with FALSE.]]> + + + + + + + + + + + + + + It has to be specified how key and values are passed from one element of + the chain to the next, by value or by reference. If a Mapper leverages the + assumed semantics that the key and values are not modified by the collector + 'by value' must be used. If the Mapper does not expect this semantics, as + an optimization to avoid serialization and deserialization 'by reference' + can be used. +

    + For the added Mapper the configuration given for it, + mapperConf, have precedence over the job's JobConf. This + precedence is in effect when the task is running. +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainMapper, this is done by the addMapper for the last mapper in the chain + . + + @param job chain job's JobConf to add the Mapper class. + @param klass the Mapper class to add. + @param inputKeyClass mapper input key class. + @param inputValueClass mapper input value class. + @param outputKeyClass mapper output key class. + @param outputValueClass mapper output value class. + @param byValue indicates if key/values should be passed by value + to the next Mapper in the chain, if any. + @param mapperConf a JobConf with the configuration for the Mapper + class. It is recommended to use a JobConf without default values using the + JobConf(boolean loadDefaults) constructor with FALSE.]]> + + + + + + + If this method is overriden super.configure(...) should be + invoked at the beginning of the overwriter method.]]> + + + + + + + + + + reduce(...) method of the Reducer with the + map(...) methods of the Mappers in the chain.]]> + + + + + + + If this method is overriden super.close() should be + invoked at the end of the overwriter method.]]> + + + + + For each record output by the Reducer, the Mapper classes are invoked in a + chained (or piped) fashion, the output of the first becomes the input of the + second, and so on until the last Mapper, the output of the last Mapper will + be written to the task's output. +

    + The key functionality of this feature is that the Mappers in the chain do not + need to be aware that they are executed after the Reducer or in a chain. + This enables having reusable specialized Mappers that can be combined to + perform composite operations within a single task. +

    + Special care has to be taken when creating chains that the key/values output + by a Mapper are valid for the following Mapper in the chain. It is assumed + all Mappers and the Reduce in the chain use maching output and input key and + value classes as no conversion is done by the chaining code. +

    + Using the ChainMapper and the ChainReducer classes is possible to compose + Map/Reduce jobs that look like [MAP+ / REDUCE MAP*]. And + immediate benefit of this pattern is a dramatic reduction in disk IO. +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainReducer, this is done by the setReducer or the addMapper for the last + element in the chain. +

    + ChainReducer usage pattern: +

    +

    + ...
    + conf.setJobName("chain");
    + conf.setInputFormat(TextInputFormat.class);
    + conf.setOutputFormat(TextOutputFormat.class);
    +
    + JobConf mapAConf = new JobConf(false);
    + ...
    + ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
    +   Text.class, Text.class, true, mapAConf);
    +
    + JobConf mapBConf = new JobConf(false);
    + ...
    + ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
    +   LongWritable.class, Text.class, false, mapBConf);
    +
    + JobConf reduceConf = new JobConf(false);
    + ...
    + ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
    +   Text.class, Text.class, true, reduceConf);
    +
    + ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
    +   LongWritable.class, Text.class, false, null);
    +
    + ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
    +   LongWritable.class, LongWritable.class, true, null);
    +
    + FileInputFormat.setInputPaths(conf, inDir);
    + FileOutputFormat.setOutputPath(conf, outDir);
    + ...
    +
    + JobClient jc = new JobClient(conf);
    + RunningJob job = jc.submitJob(conf);
    + ...
    + 
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + RecordReader's for CombineFileSplit's. + @see CombineFileSplit]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CombineFileRecordReader. + + Subclassing is needed to get a concrete record reader wrapper because of the + constructor requirement. + + @see CombineFileRecordReader + @see CombineFileInputFormat]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CombineFileInputFormat-equivalent for + SequenceFileInputFormat. + + @see CombineFileInputFormat]]> + + + + + + + + + + + + + + + CombineFileInputFormat-equivalent for + TextInputFormat. + + @see CombineFileInputFormat]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if the name output is multi, false + if it is single. If the name output is not defined it returns + false]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + By default these counters are disabled. +

    + MultipleOutputs supports counters, by default the are disabled. + The counters group is the {@link MultipleOutputs} class name. +

    + The names of the counters are the same as the named outputs. For multi + named outputs the name of the counter is the concatenation of the named + output, and underscore '_' and the multiname. + + @param conf job conf to enableadd the named output. + @param enabled indicates if the counters will be enabled or not.]]> +
    +
    + + + + + By default these counters are disabled. +

    + MultipleOutputs supports counters, by default the are disabled. + The counters group is the {@link MultipleOutputs} class name. +

    + The names of the counters are the same as the named outputs. For multi + named outputs the name of the counter is the concatenation of the named + output, and underscore '_' and the multiname. + + + @param conf job conf to enableadd the named output. + @return TRUE if the counters are enabled, FALSE if they are disabled.]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + If overriden subclasses must invoke super.close() at the + end of their close() + + @throws java.io.IOException thrown if any of the MultipleOutput files + could not be closed properly.]]> + + + + OutputCollector passed to + the map() and reduce() methods of the + Mapper and Reducer implementations. +

    + Each additional output, or named output, may be configured with its own + OutputFormat, with its own key class and with its own value + class. +

    + A named output can be a single file or a multi file. The later is referred as + a multi named output. +

    + A multi named output is an unbound set of files all sharing the same + OutputFormat, key class and value class configuration. +

    + When named outputs are used within a Mapper implementation, + key/values written to a name output are not part of the reduce phase, only + key/values written to the job OutputCollector are part of the + reduce phase. +

    + MultipleOutputs supports counters, by default the are disabled. The counters + group is the {@link MultipleOutputs} class name. +

    + The names of the counters are the same as the named outputs. For multi + named outputs the name of the counter is the concatenation of the named + output, and underscore '_' and the multiname. +

    + Job configuration usage pattern is: +

    +
    + JobConf conf = new JobConf();
    +
    + conf.setInputPath(inDir);
    + FileOutputFormat.setOutputPath(conf, outDir);
    +
    + conf.setMapperClass(MOMap.class);
    + conf.setReducerClass(MOReduce.class);
    + ...
    +
    + // Defines additional single text based output 'text' for the job
    + MultipleOutputs.addNamedOutput(conf, "text", TextOutputFormat.class,
    + LongWritable.class, Text.class);
    +
    + // Defines additional multi sequencefile based output 'sequence' for the
    + // job
    + MultipleOutputs.addMultiNamedOutput(conf, "seq",
    +   SequenceFileOutputFormat.class,
    +   LongWritable.class, Text.class);
    + ...
    +
    + JobClient jc = new JobClient();
    + RunningJob job = jc.submitJob(conf);
    +
    + ...
    + 
    +

    + Job configuration usage pattern is: +

    +
    + public class MOReduce implements
    +   Reducer<WritableComparable, Writable> {
    + private MultipleOutputs mos;
    +
    + public void configure(JobConf conf) {
    + ...
    + mos = new MultipleOutputs(conf);
    + }
    +
    + public void reduce(WritableComparable key, Iterator<Writable> values,
    + OutputCollector output, Reporter reporter)
    + throws IOException {
    + ...
    + mos.getCollector("text", reporter).collect(key, new Text("Hello"));
    + mos.getCollector("seq", "A", reporter).collect(key, new Text("Bye"));
    + mos.getCollector("seq", "B", reporter).collect(key, new Text("Chau"));
    + ...
    + }
    +
    + public void close() throws IOException {
    + mos.close();
    + ...
    + }
    +
    + }
    + 
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + It can be used instead of the default implementation, + of {@link org.apache.hadoop.mapred.MapRunner}, when the Map + operation is not CPU bound in order to improve throughput. +

    + Map implementations using this MapRunnable must be thread-safe. +

    + The Map-Reduce job has to be configured to use this MapRunnable class (using + the JobConf.setMapRunnerClass method) and + the number of threads the thread-pool can use with the + mapred.map.multithreadedrunner.threads property, its default + value is 10 threads. +

    ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + R reduces, there are R-1 + keys in the SequenceFile. + @deprecated Use + {@link #setPartitionFile(Configuration, Path)} + instead]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Cluster. + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ClusterMetrics provides clients with information such as: +

      +
    1. + Size of the cluster. +
    2. +
    3. + Number of blacklisted and decommissioned trackers. +
    4. +
    5. + Slot capacity of the cluster. +
    6. +
    7. + The number of currently occupied/reserved map and reduce slots. +
    8. +
    9. + The number of currently running map and reduce tasks. +
    10. +
    11. + The number of job submissions. +
    12. +
    + +

    Clients can query for the latest ClusterMetrics, via + {@link Cluster#getClusterStatus()}.

    + + @see Cluster]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Counters represent global counters, defined either by the + Map-Reduce framework or applications. Each Counter is named by + an {@link Enum} and has a long for the value.

    + +

    Counters are bunched into Groups, each comprising of + counters from a particular Enum class.]]> + + + + + + + + + + + + + + + + + + + + + the type of counter + @param the type of counter group + @param counters the old counters object]]> + + + + Counters holds per job/task counters, defined either by the + Map-Reduce framework or applications. Each Counter can be of + any {@link Enum} type.

    + +

    Counters are bunched into {@link CounterGroup}s, each + comprising of counters from a particular Enum class.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Each {@link InputSplit} is then assigned to an individual {@link Mapper} + for processing.

    + +

    Note: The split is a logical split of the inputs and the + input files are not physically split into chunks. For e.g. a split could + be <input-file-path, start, offset> tuple. The InputFormat + also creates the {@link RecordReader} to read the {@link InputSplit}. + + @param context job configuration. + @return an array of {@link InputSplit}s for the job.]]> + + + + + + + + + + + + + InputFormat describes the input-specification for a + Map-Reduce job. + +

    The Map-Reduce framework relies on the InputFormat of the + job to:

    +

      +
    1. + Validate the input-specification of the job. +
    2. + Split-up the input file(s) into logical {@link InputSplit}s, each of + which is then assigned to an individual {@link Mapper}. +
    3. +
    4. + Provide the {@link RecordReader} implementation to be used to glean + input records from the logical InputSplit for processing by + the {@link Mapper}. +
    5. +
    + +

    The default behavior of file-based {@link InputFormat}s, typically + sub-classes of {@link FileInputFormat}, is to split the + input into logical {@link InputSplit}s based on the total size, in + bytes, of the input files. However, the {@link FileSystem} blocksize of + the input files is treated as an upper bound for input splits. A lower bound + on the split size can be set via + + mapreduce.input.fileinputformat.split.minsize.

    + +

    Clearly, logical splits based on input-size is insufficient for many + applications since record boundaries are to respected. In such cases, the + application has to also implement a {@link RecordReader} on whom lies the + responsibility to respect record-boundaries and present a record-oriented + view of the logical InputSplit to the individual task. + + @see InputSplit + @see RecordReader + @see FileInputFormat]]> + + + + + + + + + + + + + + + + + + + + + + + + + SplitLocationInfos describing how the split + data is stored at each location. A null value indicates that all the + locations have the data stored on disk. + @throws IOException]]> + + + + InputSplit represents the data to be processed by an + individual {@link Mapper}. + +

    Typically, it presents a byte-oriented view on the input and is the + responsibility of {@link RecordReader} of the job to process this and present + a record-oriented view. + + @see InputFormat + @see RecordReader]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Job makes a copy of the Configuration so + that any necessary internal modifications do not reflect on the incoming + parameter. + + A Cluster will be created from the conf parameter only when it's needed. + + @param conf the configuration + @return the {@link Job} , with no connection to a cluster yet. + @throws IOException]]> + + + + + + + + Job makes a copy of the Configuration so + that any necessary internal modifications do not reflect on the incoming + parameter. + + @param conf the configuration + @return the {@link Job} , with no connection to a cluster yet. + @throws IOException]]> + + + + + + + + Job makes a copy of the Configuration so + that any necessary internal modifications do not reflect on the incoming + parameter. + + @param status job status + @param conf job configuration + @return the {@link Job} , with no connection to a cluster yet. + @throws IOException]]> + + + + + + + Job makes a copy of the Configuration so + that any necessary internal modifications do not reflect on the incoming + parameter. + + @param ignored + @return the {@link Job} , with no connection to a cluster yet. + @throws IOException + @deprecated Use {@link #getInstance()}]]> + + + + + + + + Job makes a copy of the Configuration so + that any necessary internal modifications do not reflect on the incoming + parameter. + + @param ignored + @param conf job configuration + @return the {@link Job} , with no connection to a cluster yet. + @throws IOException + @deprecated Use {@link #getInstance(Configuration)}]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + progress of the job's map-tasks, as a float between 0.0 + and 1.0. When all map tasks have completed, the function returns 1.0. + + @return the progress of the job's map-tasks. + @throws IOException]]> + + + + + + progress of the job's reduce-tasks, as a float between 0.0 + and 1.0. When all reduce tasks have completed, the function returns 1.0. + + @return the progress of the job's reduce-tasks. + @throws IOException]]> + + + + + + + progress of the job's cleanup-tasks, as a float between 0.0 + and 1.0. When all cleanup tasks have completed, the function returns 1.0. + + @return the progress of the job's cleanup-tasks. + @throws IOException]]> + + + + + + progress of the job's setup-tasks, as a float between 0.0 + and 1.0. When all setup tasks have completed, the function returns 1.0. + + @return the progress of the job's setup-tasks. + @throws IOException]]> + + + + + + true if the job is complete, else false. + @throws IOException]]> + + + + + + true if the job succeeded, else false. + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + InputFormat to use + @throws IllegalStateException if the job is submitted]]> + + + + + + + OutputFormat to use + @throws IllegalStateException if the job is submitted]]> + + + + + + + Mapper to use + @throws IllegalStateException if the job is submitted]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Reducer to use + @throws IllegalStateException if the job is submitted]]> + + + + + + + Partitioner to use + @throws IllegalStateException if the job is submitted]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if speculative execution + should be turned on, else false.]]> + + + + + + true if speculative execution + should be turned on for map tasks, + else false.]]> + + + + + + true if speculative execution + should be turned on for reduce tasks, + else false.]]> + + + + + + true, job-setup and job-cleanup will be + considered from {@link OutputCommitter} + else ignored.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + JobTracker is lost]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Job. + @throws IOException if fail to close.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + It allows the user to configure the + job, submit it, control its execution, and query the state. The set methods + only work until the job is submitted, afterwards they will throw an + IllegalStateException.

    + +

    + Normally the user creates the application, describes various facets of the + job via {@link Job} and then submits the job and monitor its progress.

    + +

    Here is an example on how to submit a job:

    +

    +     // Create a new Job
    +     Job job = Job.getInstance();
    +     job.setJarByClass(MyJob.class);
    +     
    +     // Specify various job-specific parameters     
    +     job.setJobName("myjob");
    +     
    +     job.setInputPath(new Path("in"));
    +     job.setOutputPath(new Path("out"));
    +     
    +     job.setMapperClass(MyJob.MyMapper.class);
    +     job.setReducerClass(MyJob.MyReducer.class);
    +
    +     // Submit the job, then poll for progress until the job is complete
    +     job.waitForCompletion(true);
    + 
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + 1. + @return the number of reduce tasks for this job.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mapred.map.max.attempts + property. If this property is not already set, the default is 4 attempts. + + @return the max number of attempts per map task.]]> + + + + + mapred.reduce.max.attempts + property. If this property is not already set, the default is 4 attempts. + + @return the max number of attempts per reduce task.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + An example JobID is : + job_200707121733_0003 , which represents the third job + running at the jobtracker started at 200707121733. +

    + Applications should never construct or parse JobID strings, but rather + use appropriate constructors or {@link #forName(String)} method. + + @see TaskID + @see TaskAttemptID]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + the key input type to the Mapper + @param the value input type to the Mapper + @param the key output type from the Mapper + @param the value output type from the Mapper]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Maps are the individual tasks which transform input records into a + intermediate records. The transformed intermediate records need not be of + the same type as the input records. A given input pair may map to zero or + many output pairs.

    + +

    The Hadoop Map-Reduce framework spawns one map task for each + {@link InputSplit} generated by the {@link InputFormat} for the job. + Mapper implementations can access the {@link Configuration} for + the job via the {@link JobContext#getConfiguration()}. + +

    The framework first calls + {@link #setup(org.apache.hadoop.mapreduce.Mapper.Context)}, followed by + {@link #map(Object, Object, org.apache.hadoop.mapreduce.Mapper.Context)} + for each key/value pair in the InputSplit. Finally + {@link #cleanup(org.apache.hadoop.mapreduce.Mapper.Context)} is called.

    + +

    All intermediate values associated with a given output key are + subsequently grouped by the framework, and passed to a {@link Reducer} to + determine the final output. Users can control the sorting and grouping by + specifying two key {@link RawComparator} classes.

    + +

    The Mapper outputs are partitioned per + Reducer. Users can control which keys (and hence records) go to + which Reducer by implementing a custom {@link Partitioner}. + +

    Users can optionally specify a combiner, via + {@link Job#setCombinerClass(Class)}, to perform local aggregation of the + intermediate outputs, which helps to cut down the amount of data transferred + from the Mapper to the Reducer. + +

    Applications can specify if and how the intermediate + outputs are to be compressed and which {@link CompressionCodec}s are to be + used via the Configuration.

    + +

    If the job has zero + reduces then the output of the Mapper is directly written + to the {@link OutputFormat} without sorting by keys.

    + +

    Example:

    +

    + public class TokenCounterMapper 
    +     extends Mapper<Object, Text, Text, IntWritable>{
    +    
    +   private final static IntWritable one = new IntWritable(1);
    +   private Text word = new Text();
    +   
    +   public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
    +     StringTokenizer itr = new StringTokenizer(value.toString());
    +     while (itr.hasMoreTokens()) {
    +       word.set(itr.nextToken());
    +       context.write(word, one);
    +     }
    +   }
    + }
    + 
    + +

    Applications may override the + {@link #run(org.apache.hadoop.mapreduce.Mapper.Context)} method to exert + greater control on map processing e.g. multi-threaded Mappers + etc.

    + + @see InputFormat + @see JobContext + @see Partitioner + @see Reducer]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + MarkableIterator is a wrapper iterator class that + implements the {@link MarkableIteratorInterface}.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if task output recovery is supported, + false otherwise + @see #recoverTask(TaskAttemptContext) + @deprecated Use {@link #isRecoverySupported(JobContext)} instead.]]> + + + + + + + true repeatable job commit is supported, + false otherwise + @throws IOException]]> + + + + + + + true if task output recovery is supported, + false otherwise + @throws IOException + @see #recoverTask(TaskAttemptContext)]]> + + + + + + + OutputCommitter. This is called from the application master + process, but it is called individually for each task. + + If an exception is thrown the task will be attempted again. + + This may be called multiple times for the same task. But from different + application attempts. + + @param taskContext Context of the task whose output is being recovered + @throws IOException]]> + + + + OutputCommitter describes the commit of task output for a + Map-Reduce job. + +

    The Map-Reduce framework relies on the OutputCommitter of + the job to:

    +

      +
    1. + Setup the job during initialization. For example, create the temporary + output directory for the job during the initialization of the job. +
    2. +
    3. + Cleanup the job after the job completion. For example, remove the + temporary output directory after the job completion. +
    4. +
    5. + Setup the task temporary output. +
    6. +
    7. + Check whether a task needs a commit. This is to avoid the commit + procedure if a task does not need commit. +
    8. +
    9. + Commit of the task output. +
    10. +
    11. + Discard the task commit. +
    12. +
    + The methods in this class can be called from several different processes and + from several different contexts. It is important to know which process and + which context each is called from. Each method should be marked accordingly + in its documentation. It is also important to note that not all methods are + guaranteed to be called once and only once. If a method is not guaranteed to + have this property the output committer needs to handle this appropriately. + Also note it will only be in rare situations where they may be called + multiple times for the same task. + + @see org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter + @see JobContext + @see TaskAttemptContext]]> +
    +
    + + + + + + + + + + + + + + + + + + + This is to validate the output specification for the job when it is + a job is submitted. Typically checks that it does not already exist, + throwing an exception when it already exists, so that output is not + overwritten.

    + + @param context information about the job + @throws IOException when output should not be attempted]]> +
    +
    + + + + + + + + + + OutputFormat describes the output-specification for a + Map-Reduce job. + +

    The Map-Reduce framework relies on the OutputFormat of the + job to:

    +

      +
    1. + Validate the output-specification of the job. For e.g. check that the + output directory doesn't already exist. +
    2. + Provide the {@link RecordWriter} implementation to be used to write out + the output files of the job. Output files are stored in a + {@link FileSystem}. +
    3. +
    + + @see RecordWriter]]> +
    +
    + + + + + + + + + + + Typically a hash function on a all or a subset of the key.

    + + @param key the key to be partioned. + @param value the entry value. + @param numPartitions the total number of partitions. + @return the partition number for the key.]]> +
    +
    + + Partitioner controls the partitioning of the keys of the + intermediate map-outputs. The key (or a subset of the key) is used to derive + the partition, typically by a hash function. The total number of partitions + is the same as the number of reduce tasks for the job. Hence this controls + which of the m reduce tasks the intermediate key (and hence the + record) is sent for reduction.

    + +

    Note: A Partitioner is created only when there are multiple + reducers.

    + +

    Note: If you require your Partitioner class to obtain the Job's + configuration object, implement the {@link Configurable} interface.

    + + @see Reducer]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + "N/A" + + @return Scheduling information associated to particular Job Queue]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @param ]]> + + + + + + + + + + + + + + + + + + + + + + RecordWriter to future operations. + + @param context the context of the task + @throws IOException]]> + + + + RecordWriter writes the output <key, value> pairs + to an output file. + +

    RecordWriter implementations write the job outputs to the + {@link FileSystem}. + + @see OutputFormat]]> + + + + + + + + + + + + + + + + + + + + + + the class of the input keys + @param the class of the input values + @param the class of the output keys + @param the class of the output values]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Reducer implementations + can access the {@link Configuration} for the job via the + {@link JobContext#getConfiguration()} method.

    + +

    Reducer has 3 primary phases:

    +
      +
    1. + + Shuffle + +

      The Reducer copies the sorted output from each + {@link Mapper} using HTTP across the network.

      +
    2. + +
    3. + Sort + +

      The framework merge sorts Reducer inputs by + keys + (since different Mappers may have output the same key).

      + +

      The shuffle and sort phases occur simultaneously i.e. while outputs are + being fetched they are merged.

      + + SecondarySort + +

      To achieve a secondary sort on the values returned by the value + iterator, the application should extend the key with the secondary + key and define a grouping comparator. The keys will be sorted using the + entire key, but will be grouped using the grouping comparator to decide + which keys and values are sent in the same call to reduce.The grouping + comparator is specified via + {@link Job#setGroupingComparatorClass(Class)}. The sort order is + controlled by + {@link Job#setSortComparatorClass(Class)}.

      + + + For example, say that you want to find duplicate web pages and tag them + all with the url of the "best" known example. You would set up the job + like: +
        +
      • Map Input Key: url
      • +
      • Map Input Value: document
      • +
      • Map Output Key: document checksum, url pagerank
      • +
      • Map Output Value: url
      • +
      • Partitioner: by checksum
      • +
      • OutputKeyComparator: by checksum and then decreasing pagerank
      • +
      • OutputValueGroupingComparator: by checksum
      • +
      +
    4. + +
    5. + Reduce + +

      In this phase the + {@link #reduce(Object, Iterable, org.apache.hadoop.mapreduce.Reducer.Context)} + method is called for each <key, (collection of values)> in + the sorted inputs.

      +

      The output of the reduce task is typically written to a + {@link RecordWriter} via + {@link Context#write(Object, Object)}.

      +
    6. +
    + +

    The output of the Reducer is not re-sorted.

    + +

    Example:

    +

    + public class IntSumReducer<Key> extends Reducer<Key,IntWritable,
    +                                                 Key,IntWritable> {
    +   private IntWritable result = new IntWritable();
    + 
    +   public void reduce(Key key, Iterable<IntWritable> values,
    +                      Context context) throws IOException, InterruptedException {
    +     int sum = 0;
    +     for (IntWritable val : values) {
    +       sum += val.get();
    +     }
    +     result.set(sum);
    +     context.write(key, result);
    +   }
    + }
    + 
    + + @see Mapper + @see Partitioner]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + counterName. + @param counterName counter name + @return the Counter for the given counterName]]> + + + + + + + groupName and + counterName. + @param counterName counter name + @return the Counter for the given groupName and + counterName]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + An example TaskAttemptID is : + attempt_200707121733_0003_m_000005_0 , which represents the + zeroth task attempt for the fifth map task in the third job + running at the jobtracker started at 200707121733. +

    + Applications should never construct or parse TaskAttemptID strings + , but rather use appropriate constructors or {@link #forName(String)} + method. + + @see JobID + @see TaskID]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + An example TaskID is : + task_200707121733_0003_m_000005 , which represents the + fifth map task in the third job running at the jobtracker + started at 200707121733. +

    + Applications should never construct or parse TaskID strings + , but rather use appropriate constructors or {@link #forName(String)} + method. + + @see JobID + @see TaskAttemptID]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OutputCommitter for the task-attempt]]> + + + + the input key type for the task + @param the input value type for the task + @param the output key type for the task + @param the output value type for the task]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + type of the other counter + @param type of the other counter group + @param counters the counters object to copy + @param groupFactory the factory for new groups]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + type of counter inside the counters + @param type of group inside the counters]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + type of the counter for the group]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The key and values are passed from one element of the chain to the next, by + value. For the added Mapper the configuration given for it, + mapperConf, have precedence over the job's Configuration. This + precedence is in effect when the task is running. +

    +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainMapper, this is done by the addMapper for the last mapper in the chain +

    + + @param job + The job. + @param klass + the Mapper class to add. + @param inputKeyClass + mapper input key class. + @param inputValueClass + mapper input value class. + @param outputKeyClass + mapper output key class. + @param outputValueClass + mapper output value class. + @param mapperConf + a configuration for the Mapper class. It is recommended to use a + Configuration without default values using the + Configuration(boolean loadDefaults) constructor with + FALSE.]]> +
    + + + + + + + + + + + + The Mapper classes are invoked in a chained (or piped) fashion, the output of + the first becomes the input of the second, and so on until the last Mapper, + the output of the last Mapper will be written to the task's output. +

    +

    + The key functionality of this feature is that the Mappers in the chain do not + need to be aware that they are executed in a chain. This enables having + reusable specialized Mappers that can be combined to perform composite + operations within a single task. +

    +

    + Special care has to be taken when creating chains that the key/values output + by a Mapper are valid for the following Mapper in the chain. It is assumed + all Mappers and the Reduce in the chain use matching output and input key and + value classes as no conversion is done by the chaining code. +

    +

    + Using the ChainMapper and the ChainReducer classes is possible to compose + Map/Reduce jobs that look like [MAP+ / REDUCE MAP*]. And + immediate benefit of this pattern is a dramatic reduction in disk IO. +

    +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainMapper, this is done by the addMapper for the last mapper in the chain. +

    + ChainMapper usage pattern: +

    + +

    + ...
    + Job = new Job(conf);
    +
    + Configuration mapAConf = new Configuration(false);
    + ...
    + ChainMapper.addMapper(job, AMap.class, LongWritable.class, Text.class,
    +   Text.class, Text.class, true, mapAConf);
    +
    + Configuration mapBConf = new Configuration(false);
    + ...
    + ChainMapper.addMapper(job, BMap.class, Text.class, Text.class,
    +   LongWritable.class, Text.class, false, mapBConf);
    +
    + ...
    +
    + job.waitForComplettion(true);
    + ...
    + 
    ]]> +
    +
    + + + + + + + + + + + + + + + + The key and values are passed from one element of the chain to the next, by + value. For the added Reducer the configuration given for it, + reducerConf, have precedence over the job's Configuration. + This precedence is in effect when the task is running. +

    +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainReducer, this is done by the setReducer or the addMapper for the last + element in the chain. +

    + + @param job + the job + @param klass + the Reducer class to add. + @param inputKeyClass + reducer input key class. + @param inputValueClass + reducer input value class. + @param outputKeyClass + reducer output key class. + @param outputValueClass + reducer output value class. + @param reducerConf + a configuration for the Reducer class. It is recommended to use a + Configuration without default values using the + Configuration(boolean loadDefaults) constructor with + FALSE.]]> +
    +
    + + + + + + + + + + + + The key and values are passed from one element of the chain to the next, by + value For the added Mapper the configuration given for it, + mapperConf, have precedence over the job's Configuration. This + precedence is in effect when the task is running. +

    +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainMapper, this is done by the addMapper for the last mapper in the + chain. +

    + + @param job + The job. + @param klass + the Mapper class to add. + @param inputKeyClass + mapper input key class. + @param inputValueClass + mapper input value class. + @param outputKeyClass + mapper output key class. + @param outputValueClass + mapper output value class. + @param mapperConf + a configuration for the Mapper class. It is recommended to use a + Configuration without default values using the + Configuration(boolean loadDefaults) constructor with + FALSE.]]> +
    +
    + + + + + + + + + + + For each record output by the Reducer, the Mapper classes are invoked in a + chained (or piped) fashion. The output of the reducer becomes the input of + the first mapper and output of first becomes the input of the second, and so + on until the last Mapper, the output of the last Mapper will be written to + the task's output. +

    +

    + The key functionality of this feature is that the Mappers in the chain do not + need to be aware that they are executed after the Reducer or in a chain. This + enables having reusable specialized Mappers that can be combined to perform + composite operations within a single task. +

    +

    + Special care has to be taken when creating chains that the key/values output + by a Mapper are valid for the following Mapper in the chain. It is assumed + all Mappers and the Reduce in the chain use matching output and input key and + value classes as no conversion is done by the chaining code. +

    +

    Using the ChainMapper and the ChainReducer classes is possible to + compose Map/Reduce jobs that look like [MAP+ / REDUCE MAP*]. And + immediate benefit of this pattern is a dramatic reduction in disk IO.

    +

    + IMPORTANT: There is no need to specify the output key/value classes for the + ChainReducer, this is done by the setReducer or the addMapper for the last + element in the chain. +

    + ChainReducer usage pattern: +

    + +

    + ...
    + Job = new Job(conf);
    + ....
    +
    + Configuration reduceConf = new Configuration(false);
    + ...
    + ChainReducer.setReducer(job, XReduce.class, LongWritable.class, Text.class,
    +   Text.class, Text.class, true, reduceConf);
    +
    + ChainReducer.addMapper(job, CMap.class, Text.class, Text.class,
    +   LongWritable.class, Text.class, false, null);
    +
    + ChainReducer.addMapper(job, DMap.class, LongWritable.class, Text.class,
    +   LongWritable.class, LongWritable.class, true, null);
    +
    + ...
    +
    + job.waitForCompletion(true);
    + ...
    + 
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DBInputFormat emits LongWritables containing the record number as + key and DBWritables as value. + + The SQL query, and input class can be using one of the two + setInput methods.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {@link DBOutputFormat} accepts <key,value> pairs, where + key has a type extending DBWritable. Returned {@link RecordWriter} + writes only the key to the database with a batch SQL query.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DBWritable. DBWritable, is similar to {@link Writable} + except that the {@link #write(PreparedStatement)} method takes a + {@link PreparedStatement}, and {@link #readFields(ResultSet)} + takes a {@link ResultSet}. +

    + Implementations are responsible for writing the fields of the object + to PreparedStatement, and reading the fields of the object from the + ResultSet. + +

    Example:

    + If we have the following table in the database : +
    + CREATE TABLE MyTable (
    +   counter        INTEGER NOT NULL,
    +   timestamp      BIGINT  NOT NULL,
    + );
    + 
    + then we can read/write the tuples from/to the table with : +

    + public class MyWritable implements Writable, DBWritable {
    +   // Some data     
    +   private int counter;
    +   private long timestamp;
    +       
    +   //Writable#write() implementation
    +   public void write(DataOutput out) throws IOException {
    +     out.writeInt(counter);
    +     out.writeLong(timestamp);
    +   }
    +       
    +   //Writable#readFields() implementation
    +   public void readFields(DataInput in) throws IOException {
    +     counter = in.readInt();
    +     timestamp = in.readLong();
    +   }
    +       
    +   public void write(PreparedStatement statement) throws SQLException {
    +     statement.setInt(1, counter);
    +     statement.setLong(2, timestamp);
    +   }
    +       
    +   public void readFields(ResultSet resultSet) throws SQLException {
    +     counter = resultSet.getInt(1);
    +     timestamp = resultSet.getLong(2);
    +   } 
    + }
    + 
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + RecordReader's for + CombineFileSplit's. + + @see CombineFileSplit]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CombineFileRecordReader. + + Subclassing is needed to get a concrete record reader wrapper because of the + constructor requirement. + + @see CombineFileRecordReader + @see CombineFileInputFormat]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + th Path]]> + + + + + + th Path]]> + + + + + + + + + + + th Path]]> + + + + + + + + + + + + + + + + + + + + + + + + + + CombineFileSplit can be used to implement {@link RecordReader}'s, + with reading one record per file. + + @see FileSplit + @see CombineFileInputFormat]]> + + + + + + + + + + + + + + CombineFileInputFormat-equivalent for + SequenceFileInputFormat. + + @see CombineFileInputFormat]]> + + + + + + + + + + + + + + CombineFileInputFormat-equivalent for + TextInputFormat. + + @see CombineFileInputFormat]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FileInputFormat always returns + true. Implementations that may deal with non-splittable files must + override this method. + + FileInputFormat implementations can override this and return + false to ensure that individual input files are never split-up + so that {@link Mapper}s process entire files. + + @param context the job context + @param filename the file name to check + @return is this file splitable?]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FileInputFormat is the base class for all file-based + InputFormats. This provides a generic implementation of + {@link #getSplits(JobContext)}. + + Implementations of FileInputFormat can also override the + {@link #isSplitable(JobContext, Path)} method to prevent input files + from being split-up in certain situations. Implementations that may + deal with non-splittable files must override this method, since + the default implementation assumes splitting is always possible.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    or + conf.setInt(FixedLengthInputFormat.FIXED_RECORD_LENGTH, recordLength); +

    + @see FixedLengthRecordReader]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if the Job was added.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ([,]*) + func ::= tbl(,"") + class ::= @see java.lang.Class#forName(java.lang.String) + path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String) + } + Reads expression from the mapreduce.join.expr property and + user-supplied join types from mapreduce.join.define.<ident> + types. Paths supplied to tbl are given as input paths to the + InputFormat class listed. + @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ,

    ) }]]> + + + + + + + + (tbl(,),tbl(,),...,tbl(,)) }]]> + + + + + + + + (tbl(,),tbl(,),...,tbl(,)) }]]> + + + + + + + + mapreduce.join.define.<ident> to a classname. + In the expression mapreduce.join.expr, the identifier will be + assumed to be a ComposableRecordReader. + mapreduce.join.keycomparator can be a classname used to compare + keys in the join. + @see #setFormat + @see JoinRecordReader + @see MultiFilterRecordReader]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ...... + }]]> + + + + + + + + + + + + + + + + + + + + + capacity children to position + id in the parent reader. + The id of a root CompositeRecordReader is -1 by convention, but relying + on this is not recommended.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + override(S1,S2,S3) will prefer values + from S3 over S2, and values from S2 over S1 for all keys + emitted from all sources.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + [<child1>,<child2>,...,<childn>]]]> + + + + + + + out. + TupleWritable format: + {@code + ...... + }]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + the map's input key type + @param the map's input value type + @param the map's output key type + @param the map's output value type + @param job the job + @return the mapper class to run]]> + + + + + + + the map input key type + @param the map input value type + @param the map output key type + @param the map output value type + @param job the job to modify + @param cls the class to use as the mapper]]> + + + + + + + + + + + + + + + + + It can be used instead of the default implementation, + {@link org.apache.hadoop.mapred.MapRunner}, when the Map operation is not CPU + bound in order to improve throughput. +

    + Mapper implementations using this MapRunnable must be thread-safe. +

    + The Map-Reduce job has to be configured with the mapper to use via + {@link #setMapperClass(Job, Class)} and + the number of thread the thread-pool can use with the + {@link #getNumberOfThreads(JobContext)} method. The default + value is 10 threads. +

    ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + MapContext to be wrapped + @return a wrapped Mapper.Context for custom implementations]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

  • + In applications which take a classname of committer in + a configuration option, set it to the canonical name of this class + (see {@link #NAME}). When this class is instantiated, it will + use the factory mechanism to locate the configured committer for the + destination. +
  • +
  • + In code, explicitly create an instance of this committer through + its constructor, then invoke commit lifecycle operations on it. + The dynamically configured committer will be created in the constructor + and have the lifecycle operations relayed to it. +
  • + ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if the job output should be compressed, + false otherwise]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Tasks' Side-Effect Files + +

    Some applications need to create/write-to side-files, which differ from + the actual job-outputs. + +

    In such cases there could be issues with 2 instances of the same TIP + (running simultaneously e.g. speculative tasks) trying to open/write-to the + same file (path) on HDFS. Hence the application-writer will have to pick + unique names per task-attempt (e.g. using the attemptid, say + attempt_200709221812_0001_m_000000_0), not just per TIP.

    + +

    To get around this the Map-Reduce framework helps the application-writer + out by maintaining a special + ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} + sub-directory for each task-attempt on HDFS where the output of the + task-attempt goes. On successful completion of the task-attempt the files + in the ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} (only) + are promoted to ${mapreduce.output.fileoutputformat.outputdir}. Of course, the + framework discards the sub-directory of unsuccessful task-attempts. This + is completely transparent to the application.

    + +

    The application-writer can take advantage of this by creating any + side-files required in a work directory during execution + of his task i.e. via + {@link #getWorkOutputPath(TaskInputOutputContext)}, and + the framework will move them out similarly - thus she doesn't have to pick + unique paths per task-attempt.

    + +

    The entire discussion holds true for maps of jobs with + reducer=NONE (i.e. 0 reduces) since output of the map, in that case, + goes directly to HDFS.

    + + @return the {@link Path} to the task's temporary output directory + for the map-reduce job.]]> +
    +
    + + + + + + + + The path can be used to create custom files from within the map and + reduce tasks. The path name will be unique for each task. The path parent + will be the job output directory.

    ls + +

    This method uses the {@link #getUniqueFile} method to make the file name + unique for the task.

    + + @param context the context for the task. + @param name the name for the file. + @param extension the extension for the file + @return a unique path accross all tasks of the job.]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Warning: when the baseOutputPath is a path that resolves + outside of the final job output directory, the directory is created + immediately and then persists through subsequent task retries, breaking + the concept of output committing.]]> + + + + + + + + + + Warning: when the baseOutputPath is a path that resolves + outside of the final job output directory, the directory is created + immediately and then persists through subsequent task retries, breaking + the concept of output committing.]]> + + + + + + + super.close() at the + end of their close()]]> + + + + + Case one: writing to additional outputs other than the job default output. + + Each additional output, or named output, may be configured with its own + OutputFormat, with its own key class and with its own value + class. +

    + +

    + Case two: to write data to different files provided by user +

    + +

    + MultipleOutputs supports counters, by default they are disabled. The + counters group is the {@link MultipleOutputs} class name. The names of the + counters are the same as the output name. These count the number records + written to each output name. +

    + + Usage pattern for job submission: +
    +
    + Job job = new Job();
    +
    + FileInputFormat.setInputPath(job, inDir);
    + FileOutputFormat.setOutputPath(job, outDir);
    +
    + job.setMapperClass(MOMap.class);
    + job.setReducerClass(MOReduce.class);
    + ...
    +
    + // Defines additional single text based output 'text' for the job
    + MultipleOutputs.addNamedOutput(job, "text", TextOutputFormat.class,
    + LongWritable.class, Text.class);
    +
    + // Defines additional sequence-file based output 'sequence' for the job
    + MultipleOutputs.addNamedOutput(job, "seq",
    +   SequenceFileOutputFormat.class,
    +   LongWritable.class, Text.class);
    + ...
    +
    + job.waitForCompletion(true);
    + ...
    + 
    +

    + Usage in Reducer: +

    + <K, V> String generateFileName(K k, V v) {
    +   return k.toString() + "_" + v.toString();
    + }
    + 
    + public class MOReduce extends
    +   Reducer<WritableComparable, Writable,WritableComparable, Writable> {
    + private MultipleOutputs mos;
    + public void setup(Context context) {
    + ...
    + mos = new MultipleOutputs(context);
    + }
    +
    + public void reduce(WritableComparable key, Iterator<Writable> values,
    + Context context)
    + throws IOException {
    + ...
    + mos.write("text", , key, new Text("Hello"));
    + mos.write("seq", LongWritable(1), new Text("Bye"), "seq_a");
    + mos.write("seq", LongWritable(2), key, new Text("Chau"), "seq_b");
    + mos.write(key, new Text("value"), generateFileName(key, new Text("value")));
    + ...
    + }
    +
    + public void cleanup(Context) throws IOException {
    + mos.close();
    + ...
    + }
    +
    + }
    + 
    + +

    + When used in conjuction with org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat, + MultipleOutputs can mimic the behaviour of MultipleTextOutputFormat and MultipleSequenceFileOutputFormat + from the old Hadoop API - ie, output can be written from the Reducer to more than one location. +

    + +

    + Use MultipleOutputs.write(KEYOUT key, VALUEOUT value, String baseOutputPath) to write key and + value to a path specified by baseOutputPath, with no need to specify a named output. + Warning: when the baseOutputPath passed to MultipleOutputs.write + is a path that resolves outside of the final job output directory, the + directory is created immediately and then persists through subsequent + task retries, breaking the concept of output committing: +

    + +
    + private MultipleOutputs<Text, Text> out;
    + 
    + public void setup(Context context) {
    +   out = new MultipleOutputs<Text, Text>(context);
    +   ...
    + }
    + 
    + public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
    + for (Text t : values) {
    +   out.write(key, t, generateFileName(<parameter list...>));
    +   }
    + }
    + 
    + protected void cleanup(Context context) throws IOException, InterruptedException {
    +   out.close();
    + }
    + 
    + +

    + Use your own code in generateFileName() to create a custom path to your results. + '/' characters in baseOutputPath will be translated into directory levels in your file system. + Also, append your custom-generated path with "part" or similar, otherwise your output will be -00000, -00001 etc. + No call to context.write() is necessary. See example generateFileName() code below. +

    + +
    + private String generateFileName(Text k) {
    +   // expect Text k in format "Surname|Forename"
    +   String[] kStr = k.toString().split("\\|");
    +   
    +   String sName = kStr[0];
    +   String fName = kStr[1];
    +
    +   // example for k = Smith|John
    +   // output written to /user/hadoop/path/to/output/Smith/John-r-00000 (etc)
    +   return sName + "/" + fName;
    + }
    + 
    + +

    + Using MultipleOutputs in this way will still create zero-sized default output, eg part-00000. + To prevent this use LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class); + instead of job.setOutputFormatClass(TextOutputFormat.class); in your Hadoop job configuration. +

    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This allows the user to specify the key class to be different + from the actual class ({@link BytesWritable}) used for writing

    + + @param job the {@link Job} to modify + @param theClass the SequenceFile output key class.]]> +
    +
    + + + + + This allows the user to specify the value class to be different + from the actual class ({@link BytesWritable}) used for writing

    + + @param job the {@link Job} to modify + @param theClass the SequenceFile output key class.]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + bytes[left:(right+1)] in Python syntax. + + @param conf configuration object + @param left left Python-style offset + @param right right Python-style offset]]> + + + + + + + bytes[offset:] in Python syntax. + + @param conf configuration object + @param offset left Python-style offset]]> + + + + + + + bytes[:(offset+1)] in Python syntax. + + @param conf configuration object + @param offset right Python-style offset]]> + + + + + + + + + + + + + + + + + + + + + Partition {@link BinaryComparable} keys using a configurable part of + the bytes array returned by {@link BinaryComparable#getBytes()}.

    + +

    The subarray to be used for the partitioning can be defined by means + of the following properties: +

      +
    • + mapreduce.partition.binarypartitioner.left.offset: + left offset in array (0 by default) +
    • +
    • + mapreduce.partition.binarypartitioner.right.offset: + right offset in array (-1 by default) +
    • +
    + Like in Python, both negative and positive offsets are allowed, but + the meaning is slightly different. In case of an array of length 5, + for instance, the possible offsets are: +
    
    +  +---+---+---+---+---+
    +  | B | B | B | B | B |
    +  +---+---+---+---+---+
    +    0   1   2   3   4
    +   -5  -4  -3  -2  -1
    + 
    + The first row of numbers gives the position of the offsets 0...5 in + the array; the second row gives the corresponding negative offsets. + Contrary to Python, the specified subarray has byte i + and j as first and last element, repectively, when + i and j are the left and right offset. + +

    For Hadoop programs written in Java, it is advisable to use one of + the following static convenience methods for setting the offsets: +

      +
    • {@link #setOffsets}
    • +
    • {@link #setLeftOffset}
    • +
    • {@link #setRightOffset}
    • +
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + total.order.partitioner.natural.order is not false, a trie + of the first total.order.partitioner.max.trie.depth(2) + 1 bytes + will be built. Otherwise, keys will be located using a binary search of + the partition keyset using the {@link org.apache.hadoop.io.RawComparator} + defined for this job. The input file must be sorted with the same + comparator and contain {@link Job#getNumReduceTasks()} - 1 keys.]]> + + + + + + + + + + + + + + R reduces, there are R-1 + keys in the SequenceFile.]]> + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ReduceContext to be wrapped + @return a wrapped Reducer.Context for custom implementations]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.2.2.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.2.2.xml new file mode 100644 index 0000000000000..2b304e9836640 --- /dev/null +++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.2.2.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml index 1ae481d283ba7..c07df48ad3401 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml @@ -19,11 +19,11 @@ hadoop-mapreduce-client org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-mapreduce-client-app - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce App diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index fed500a4295cb..26a648fedb464 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -35,7 +35,7 @@ import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.FSError; import org.apache.hadoop.fs.FileContext; @@ -67,7 +67,7 @@ import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java index b04dac5f0a169..c80ead4a46d78 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.checkpoint.TaskCheckpointID; import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; @@ -58,11 +59,12 @@ import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.StringInterner; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class is responsible for talking to the task umblical. @@ -94,6 +96,12 @@ public class TaskAttemptListenerImpl extends CompositeService AtomicReference> attemptIdToStatus = new ConcurrentHashMap<>(); + /** + * A Map to keep track of the history of logging each task attempt. + */ + private ConcurrentHashMap + taskAttemptLogProgressStamps = new ConcurrentHashMap<>(); + private Set launchedJVMs = Collections .newSetFromMap(new ConcurrentHashMap()); @@ -123,10 +131,12 @@ public TaskAttemptListenerImpl(AppContext context, @Override protected void serviceInit(Configuration conf) throws Exception { - registerHeartbeatHandler(conf); - commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS, - MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS); - super.serviceInit(conf); + registerHeartbeatHandler(conf); + commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS, + MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS); + // initialize the delta threshold for logging the task progress. + MRJobConfUtil.setTaskLogProgressDeltaThresholds(conf); + super.serviceInit(conf); } @Override @@ -410,8 +420,10 @@ public AMFeedback statusUpdate(TaskAttemptID taskAttemptID, taskAttemptStatus.id = yarnAttemptID; // Task sends the updated progress to the TT. taskAttemptStatus.progress = taskStatus.getProgress(); - LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : " - + taskStatus.getProgress()); + // log the new progress + taskAttemptLogProgressStamps.computeIfAbsent(taskAttemptID, + k -> new TaskProgressLogPair(taskAttemptID)) + .update(taskStatus.getProgress()); // Task sends the updated state-string to the TT. taskAttemptStatus.stateString = taskStatus.getStateString(); // Task sends the updated phase to the TT. @@ -637,4 +649,68 @@ private void coalesceStatusUpdate(TaskAttemptId yarnAttemptID, AtomicReference> getAttemptIdToStatus() { return attemptIdToStatus; } + + /** + * Entity to keep track of the taskAttempt, last time it was logged, + * and the + * progress that has been logged. + */ + class TaskProgressLogPair { + + /** + * The taskAttemptId of that history record. + */ + private final TaskAttemptID taskAttemptID; + /** + * Timestamp of last time the progress was logged. + */ + private volatile long logTimeStamp; + /** + * Snapshot of the last logged progress. + */ + private volatile double prevProgress; + + TaskProgressLogPair(final TaskAttemptID attemptID) { + taskAttemptID = attemptID; + prevProgress = 0.0; + logTimeStamp = 0; + } + + private void resetLog(final boolean doLog, + final float progress, final double processedProgress, + final long timestamp) { + if (doLog) { + prevProgress = processedProgress; + logTimeStamp = timestamp; + LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : " + + progress); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Progress of TaskAttempt " + taskAttemptID + " is : " + + progress); + } + } + } + + public void update(final float progress) { + final double processedProgress = + MRJobConfUtil.convertTaskProgressToFactor(progress); + final double diffProgress = processedProgress - prevProgress; + final long currentTime = Time.monotonicNow(); + boolean result = + (Double.compare(diffProgress, + MRJobConfUtil.getTaskProgressMinDeltaThreshold()) >= 0); + if (!result) { + // check if time has expired. + result = ((currentTime - logTimeStamp) + >= MRJobConfUtil.getTaskProgressWaitDeltaTimeThreshold()); + } + // It is helpful to log the progress when it reaches 1.0F. + if (Float.compare(progress, 1.0f) == 0) { + result = true; + taskAttemptLogProgressStamps.remove(taskAttemptID); + } + resetLog(result, progress, processedProgress, currentTime); + } + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java index e81b090a67336..dd0544fd3e35f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java @@ -63,7 +63,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The main() for MapReduce task processes. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index c7e69af8a8140..fc82a1cc34fa1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -79,7 +79,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.TimelineServiceHelper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.client.ClientHandlerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java index 3bf05420f0ca6..ed49f82506bc6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java @@ -25,9 +25,11 @@ import java.net.Proxy; import java.net.URL; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobContext; +import org.apache.hadoop.mapreduce.CustomJobEndNotifier; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.eclipse.jetty.util.log.Log; @@ -57,6 +59,9 @@ public class JobEndNotifier implements Configurable { protected int timeout; // Timeout (ms) on the connection and notification protected URL urlToNotify; //URL to notify read from the config protected Proxy proxyToUse = Proxy.NO_PROXY; //Proxy to use for notification + // A custom notifier implementation + // (see org.apache.hadoop.mapreduce.CustomJobEndNotifier) + private String customJobEndNotifierClassName; /** * Parse the URL that needs to be notified of the end of the job, along @@ -84,6 +89,9 @@ public void setConf(Configuration conf) { proxyConf = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY); + customJobEndNotifierClassName = StringUtils.stripToNull( + conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS)); + //Configure the proxy to use if its set. It should be set like //proxyType@proxyHostname:port if(proxyConf != null && !proxyConf.equals("") && @@ -115,11 +123,22 @@ public void setConf(Configuration conf) { public Configuration getConf() { return conf; } - + /** * Notify the URL just once. Use best effort. */ protected boolean notifyURLOnce() { + if (customJobEndNotifierClassName == null) { + return notifyViaBuiltInNotifier(); + } else { + return notifyViaCustomNotifier(); + } + } + + /** + * Uses a simple HttpURLConnection to do the Job end notification. + */ + private boolean notifyViaBuiltInNotifier() { boolean success = false; try { Log.getLog().info("Job end notification trying " + urlToNotify); @@ -145,6 +164,36 @@ protected boolean notifyURLOnce() { return success; } + /** + * Uses the custom Job end notifier class to do the Job end notification. + */ + private boolean notifyViaCustomNotifier() { + try { + Log.getLog().info("Will be using " + customJobEndNotifierClassName + + " for Job end notification"); + + final Class customJobEndNotifierClass = + Class.forName(customJobEndNotifierClassName) + .asSubclass(CustomJobEndNotifier.class); + final CustomJobEndNotifier customJobEndNotifier = + customJobEndNotifierClass.getDeclaredConstructor().newInstance(); + + boolean success = customJobEndNotifier.notifyOnce(urlToNotify, conf); + if (success) { + Log.getLog().info("Job end notification to " + urlToNotify + + " succeeded"); + } else { + Log.getLog().warn("Job end notification to " + urlToNotify + + " failed"); + } + return success; + } catch (Exception e) { + Log.getLog().warn("Job end notification to " + urlToNotify + + " failed", e); + return false; + } + } + /** * Notify a server of the completion of a submitted job. The user must have * configured MRJobConfig.MR_JOB_END_NOTIFICATION_URL diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index def987207a22e..dfde6b42274cb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -154,7 +154,7 @@ import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.SystemClock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java index 9439a7be8d61c..37a1d49153c58 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java @@ -24,7 +24,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.util.MRJobConfUtil; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java index 849c9144913bc..c72e13e648e15 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java @@ -51,7 +51,7 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index d2e2492be6ce8..5ef12509ad75b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -130,8 +130,8 @@ import org.apache.hadoop.yarn.state.StateMachineFactory; import org.apache.hadoop.yarn.util.Clock; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1423,7 +1423,8 @@ public static String escapeString(String data) { * be set up to false. In that way, the NMs that host the task containers * won't try to upload the resources to shared cache. */ - private static void cleanupSharedCacheUploadPolicies(Configuration conf) { + @VisibleForTesting + static void cleanupSharedCacheUploadPolicies(Configuration conf) { Job.setArchiveSharedCacheUploadPolicies(conf, Collections.emptyMap()); Job.setFileSharedCacheUploadPolicies(conf, Collections.emptyMap()); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java index 0f51fef58fbe9..3943a3aa913f9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java @@ -145,8 +145,8 @@ import org.apache.hadoop.yarn.util.UnitsConversionUtil; import org.apache.hadoop.yarn.util.resource.ResourceUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -158,6 +158,9 @@ public abstract class TaskAttemptImpl implements org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt, EventHandler { + @VisibleForTesting + protected final static Map RESOURCE_REQUEST_CACHE + = new HashMap<>(); static final Counters EMPTY_COUNTERS = new Counters(); private static final Logger LOG = LoggerFactory.getLogger(TaskAttemptImpl.class); @@ -172,7 +175,7 @@ public abstract class TaskAttemptImpl implements private final Clock clock; private final org.apache.hadoop.mapred.JobID oldJobId; private final TaskAttemptListener taskAttemptListener; - private final Resource resourceCapability; + private Resource resourceCapability; protected Set dataLocalHosts; protected Set dataLocalRacks; private final List diagnostics = new ArrayList(); @@ -707,6 +710,10 @@ private void populateResourceCapability(TaskType taskType) { getResourceTypePrefix(taskType); boolean memorySet = false; boolean cpuVcoresSet = false; + if (RESOURCE_REQUEST_CACHE.get(taskType) != null) { + resourceCapability = RESOURCE_REQUEST_CACHE.get(taskType); + return; + } if (resourceTypePrefix != null) { List resourceRequests = ResourceUtils.getRequestedResourcesFromConfig(conf, @@ -767,6 +774,9 @@ private void populateResourceCapability(TaskType taskType) { if (!cpuVcoresSet) { this.resourceCapability.setVirtualCores(getCpuRequired(conf, taskType)); } + RESOURCE_REQUEST_CACHE.put(taskType, resourceCapability); + LOG.info("Resource capability of task type {} is set to {}", + taskType, resourceCapability); } private String getCpuVcoresKey(TaskType taskType) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java index ce3b3cc59639f..6b95bf48c82db 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java @@ -36,6 +36,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.MRConfig; +import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; @@ -94,7 +95,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Implementation of Task interface. @@ -142,7 +143,8 @@ public abstract class TaskImpl implements Task, EventHandler { private boolean historyTaskStartGenerated = false; // Launch time reported in history events. private long launchTime; - + private boolean speculationEnabled = false; + private static final SingleArcTransition ATTEMPT_KILLED_TRANSITION = new AttemptKilledTransition(); private static final SingleArcTransition @@ -325,6 +327,9 @@ public TaskImpl(JobId jobId, TaskType taskType, int partition, this.appContext = appContext; this.encryptedShuffle = conf.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, MRConfig.SHUFFLE_SSL_ENABLED_DEFAULT); + this.speculationEnabled = taskType.equals(TaskType.MAP) ? + conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false) : + conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false); // This "this leak" is okay because the retained pointer is in an // instance variable. @@ -1079,13 +1084,19 @@ public TaskStateInternal transition(TaskImpl task, TaskEvent event) { if (task.successfulAttempt == null) { boolean shouldAddNewAttempt = true; if (task.inProgressAttempts.size() > 0) { - // if not all of the inProgressAttempts are hanging for resource - for (TaskAttemptId attemptId : task.inProgressAttempts) { - if (((TaskAttemptImpl) task.getAttempt(attemptId)) - .isContainerAssigned()) { - shouldAddNewAttempt = false; - break; + if(task.speculationEnabled) { + // if not all of the inProgressAttempts are hanging for resource + for (TaskAttemptId attemptId : task.inProgressAttempts) { + if (((TaskAttemptImpl) task.getAttempt(attemptId)) + .isContainerAssigned()) { + shouldAddNewAttempt = false; + break; + } } + } else { + // No need to add new attempt if there are in progress attempts + // when speculation is false + shouldAddNewAttempt = false; } } if (shouldAddNewAttempt) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java index 288a743ce6c47..d09b3cb1e56b3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java @@ -57,7 +57,7 @@ import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index 7bcb2ce73163e..875726647b2c7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -57,7 +57,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java index a0a4def86343e..9adfb82d6e408 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java @@ -87,7 +87,7 @@ import org.apache.hadoop.yarn.util.RackResolver; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java index bb3e1faa468dd..e67acd0e13a43 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java @@ -51,7 +51,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.util.resource.Resources; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java index 211bdc4da90b9..91215b528b359 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java @@ -49,7 +49,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.Clock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.event.Event; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java index fe3ace8beff49..72f8047dc33d9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java @@ -78,7 +78,7 @@ import org.apache.hadoop.yarn.webapp.BadRequestException; import org.apache.hadoop.yarn.webapp.NotFoundException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import com.google.inject.Inject; @Path("/ws/v1/mapreduce") diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java index 405c5f2615dcd..061de154b8b83 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java @@ -41,7 +41,7 @@ import org.apache.hadoop.yarn.webapp.View; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import com.google.inject.Inject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java index b3cefc61fc167..49b986e225973 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java @@ -51,7 +51,7 @@ public void testFinshingAttemptTimeout() Configuration conf = new Configuration(); conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT, 100); conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS, 10); - + conf.setDouble(MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD, 0.01); AppContext appCtx = mock(AppContext.class); JobTokenSecretManager secret = mock(JobTokenSecretManager.class); RMHeartbeatHandler rmHeartbeatHandler = diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java index 150ef1fff25a6..f8b8c6ccdf1de 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.mapred; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.mapred.Counters.Counter; import org.apache.hadoop.mapreduce.checkpoint.EnumCounter; @@ -501,6 +501,8 @@ protected void stopRpcServer() { Configuration conf = new Configuration(); conf.setLong(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 1); + conf.setDouble(MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD, 0.01); + conf.setDouble(MRJobConfig.TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS, 1); tal.init(conf); tal.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java index c9a7d2d083142..fbeb94a2a16ac 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java @@ -1119,6 +1119,7 @@ protected void processDoneFiles(JobId jobId) throws IOException { class JHEventHandlerForSigtermTest extends JobHistoryEventHandler { public JHEventHandlerForSigtermTest(AppContext context, int startCount) { super(context, startCount); + JobHistoryEventHandler.fileMap.clear(); } public void addToFileMap(JobId jobId) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java index 06866337263e1..b8b88b3be1d08 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java @@ -31,7 +31,7 @@ import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; import org.apache.hadoop.yarn.util.Clock; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; public class MockAppContext implements AppContext { final ApplicationAttemptId appAttemptID; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java index bfb8d79de9391..586d86aee1d93 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java @@ -69,9 +69,9 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.util.Records; -import com.google.common.collect.Iterators; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; public class MockJobs extends MockApps { static final Iterator JOB_STATES = Iterators.cycle(JobState diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java index a2f0abaaa3d8d..44a4760eb6803 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java @@ -271,6 +271,7 @@ public InetSocketAddress getAddress() { protected void serviceInit(Configuration conf) throws Exception { conf.setInt(MRJobConfig.TASK_TIMEOUT, 1*1000);//reduce timeout conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 1*1000); + conf.setDouble(MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD, 0.01); super.serviceInit(conf); } }; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java index dd6f810b7ed44..d2bd0104fff6f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java @@ -26,7 +26,7 @@ import java.util.Iterator; import java.util.concurrent.atomic.AtomicReference; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.Counters; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java index 5af79d6f73b70..1cd625551a620 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java @@ -31,6 +31,7 @@ import java.net.Proxy; import java.net.URI; import java.net.URISyntaxException; +import java.net.URL; import java.nio.channels.ClosedChannelException; import javax.servlet.ServletException; @@ -42,7 +43,9 @@ import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobContext; +import org.apache.hadoop.mapreduce.CustomJobEndNotifier; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; @@ -299,6 +302,45 @@ public void testNotificationOnLastRetryUnregistrationFailure() server.stop(); } + @Test + public void testCustomNotifierClass() throws InterruptedException { + JobConf conf = new JobConf(); + conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL, + "http://example.com?jobId=$jobId&jobStatus=$jobStatus"); + conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS, + CustomNotifier.class.getName()); + this.setConf(conf); + + JobReport jobReport = mock(JobReport.class); + JobId jobId = mock(JobId.class); + when(jobId.toString()).thenReturn("mock-Id"); + when(jobReport.getJobId()).thenReturn(jobId); + when(jobReport.getJobState()).thenReturn(JobState.SUCCEEDED); + + CustomNotifier.urlToNotify = null; + this.notify(jobReport); + final URL urlToNotify = CustomNotifier.urlToNotify; + + Assert.assertEquals("http://example.com?jobId=mock-Id&jobStatus=SUCCEEDED", + urlToNotify.toString()); + } + + public static final class CustomNotifier implements CustomJobEndNotifier { + + /** + * Once notifyOnce was invoked we'll store the URL in this variable + * so we can assert on it. + */ + private static URL urlToNotify = null; + + @Override + public boolean notifyOnce(final URL url, final Configuration jobConf) { + urlToNotify = url; + return true; + } + + } + private static HttpServer2 startHttpServer() throws Exception { new File(System.getProperty( "build.webapps", "build/webapps") + "/test").mkdirs(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java index 7e9ec7d4d1565..534bcd0940895 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java @@ -28,7 +28,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java index dce69e41a1d79..5a23b58875a0b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java @@ -28,7 +28,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -40,6 +40,7 @@ import java.util.concurrent.TimeoutException; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent; import org.junit.Assert; @@ -105,6 +106,8 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.SystemClock; + +import org.junit.BeforeClass; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.slf4j.Logger; @@ -114,15 +117,24 @@ public class TestRecovery { private static final Logger LOG = LoggerFactory.getLogger(TestRecovery.class); - private static Path outputDir = new Path(new File("target", - TestRecovery.class.getName()).getAbsolutePath() + - Path.SEPARATOR + "out"); + + private static File testRootDir; + private static Path outputDir; private static String partFile = "part-r-00000"; private Text key1 = new Text("key1"); private Text key2 = new Text("key2"); private Text val1 = new Text("val1"); private Text val2 = new Text("val2"); + @BeforeClass + public static void setupClass() throws Exception { + // setup the test root directory + testRootDir = + GenericTestUtils.setupTestRootDir( + TestRecovery.class); + outputDir = new Path(testRootDir.getAbsolutePath(), "out"); + } + /** * AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt * completely disappears because of failed launch, one attempt gets killed and @@ -600,14 +612,13 @@ public void testRecoveryWithSpillEncryption() throws Exception { MRApp app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), true, ++runCount) { }; - Configuration conf = new Configuration(); + Configuration conf = + MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(null); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true); conf.setBoolean("mapred.mapper.new-api", true); conf.setBoolean("mapred.reducer.new-api", true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); - conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); - // run the MR job at the first attempt Job jobAttempt1 = app.submit(conf); app.waitForState(jobAttempt1, JobState.RUNNING); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java index 418f09e8d3597..f5c30c2a8db54 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java @@ -25,7 +25,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.JobId; @@ -63,6 +63,7 @@ public void testTaskTimeout() throws InterruptedException { // so that TASK_TIMEOUT is not overridden conf.setLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL, 5); conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 10); //10 ms + conf.setDouble(MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD, 0.01); hb.init(conf); hb.start(); @@ -205,6 +206,7 @@ public void testTaskUnregistered() throws Exception { new TaskHeartbeatHandler(mockHandler, clock, 1); Configuration conf = new Configuration(); conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 1); + conf.setDouble(MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD, 0.01); hb.init(conf); hb.start(); try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java index 945b2543919c0..43e59a7b345c2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java @@ -39,6 +39,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.JobID; @@ -991,6 +992,28 @@ public void testJobPriorityUpdate() throws Exception { Assert.assertEquals(updatedPriority, jobPriority); } + @Test + public void testCleanupSharedCacheUploadPolicies() { + Configuration config = new Configuration(); + Map archivePolicies = new HashMap<>(); + archivePolicies.put("archive1", true); + archivePolicies.put("archive2", true); + Job.setArchiveSharedCacheUploadPolicies(config, archivePolicies); + Map filePolicies = new HashMap<>(); + filePolicies.put("file1", true); + filePolicies.put("jar1", true); + Job.setFileSharedCacheUploadPolicies(config, filePolicies); + Assert.assertEquals( + 2, Job.getArchiveSharedCacheUploadPolicies(config).size()); + Assert.assertEquals( + 2, Job.getFileSharedCacheUploadPolicies(config).size()); + JobImpl.cleanupSharedCacheUploadPolicies(config); + Assert.assertEquals( + 0, Job.getArchiveSharedCacheUploadPolicies(config).size()); + Assert.assertEquals( + 0, Job.getFileSharedCacheUploadPolicies(config).size()); + } + private static CommitterEventHandler createCommitterEventHandler( Dispatcher dispatcher, OutputCommitter committer) { final SystemClock clock = SystemClock.getInstance(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index a5f0f7b788084..4c03c78bb95b5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -38,11 +38,12 @@ import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent; import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider; import org.junit.After; import org.junit.Assert; +import org.junit.Before; import org.junit.BeforeClass; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -113,7 +114,7 @@ import org.junit.Test; import org.mockito.ArgumentCaptor; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @SuppressWarnings({"unchecked", "rawtypes"}) public class TestTaskAttempt{ @@ -155,6 +156,11 @@ public static void setupBeforeClass() { ResourceUtils.resetResourceTypes(new Configuration()); } + @Before + public void before() { + TaskAttemptImpl.RESOURCE_REQUEST_CACHE.clear(); + } + @After public void tearDown() { ResourceUtils.resetResourceTypes(new Configuration()); @@ -1721,6 +1727,7 @@ public void testReducerMemoryRequestOverriding() { TestAppender testAppender = new TestAppender(); final Logger logger = Logger.getLogger(TaskAttemptImpl.class); try { + TaskAttemptImpl.RESOURCE_REQUEST_CACHE.clear(); logger.addAppender(testAppender); EventHandler eventHandler = mock(EventHandler.class); Clock clock = SystemClock.getInstance(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java index 75798876ddaaa..1578ef0aba481 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java @@ -153,7 +153,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.mockito.InOrder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java index d55c9299c18a7..adb6a57367078 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java @@ -69,7 +69,7 @@ import org.junit.Rule; import org.junit.Test; -import com.google.common.net.HttpHeaders; +import org.apache.hadoop.thirdparty.com.google.common.net.HttpHeaders; import com.google.inject.Injector; import org.junit.contrib.java.lang.system.EnvironmentVariables; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java index c5dc29075d47e..7317baae4a8c1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java @@ -48,7 +48,7 @@ import org.w3c.dom.NodeList; import org.xml.sax.InputSource; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml index 1b48f399b168e..180123b7514b9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml @@ -19,11 +19,11 @@ hadoop-mapreduce-client org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-mapreduce-client-common - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce Common diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java index 1565e2e1b9bd9..8f17ffda422d4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java @@ -55,8 +55,8 @@ import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.FSDownload; -import com.google.common.collect.Maps; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java index 0f1d759c46e3e..88a10e2a8d143 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java @@ -70,7 +70,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ReflectionUtils; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java index 156930325bcef..5ff80809450b2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java @@ -22,7 +22,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol; import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB; @@ -34,7 +34,7 @@ public HSClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { super(); RPC.setProtocolEngine(conf, HSClientProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = (HSClientProtocolPB)RPC.getProxy( HSClientProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java index efd48715283d4..7d8344841b886 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java @@ -24,7 +24,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; @@ -108,8 +108,10 @@ public class MRClientProtocolPBClientImpl implements MRClientProtocol, public MRClientProtocolPBClientImpl() {}; - public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, MRClientProtocolPB.class, ProtobufRpcEngine.class); + public MRClientProtocolPBClientImpl(long clientVersion, + InetSocketAddress addr, Configuration conf) throws IOException { + RPC.setProtocolEngine(conf, MRClientProtocolPB.class, + ProtobufRpcEngine2.class); proxy = RPC.getProxy(MRClientProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java index 1b087a760cb1b..41a8458ed3f3f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java @@ -32,7 +32,7 @@ import java.util.List; import java.util.Map; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java index 8a6e13871139c..acda0f43d9186 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.mapreduce.v2.util; -import com.google.common.base.Joiner; -import com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo similarity index 100% rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/jobhistory/TestJobHistoryUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/jobhistory/TestJobHistoryUtils.java index cee401e1792df..447d18aa5589c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/jobhistory/TestJobHistoryUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/jobhistory/TestJobHistoryUtils.java @@ -26,7 +26,7 @@ import java.util.List; import java.util.Map; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml index 1bbd52058dc02..58dbd1a32ce9a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml @@ -19,11 +19,11 @@ hadoop-mapreduce-client org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-mapreduce-client-core - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce Core diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java index 94ad9e0187ef2..5bd26883af0fa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java @@ -42,7 +42,8 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; -import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -576,7 +577,7 @@ private Writer createSpillFile() throws IOException { file = lDirAlloc.getLocalPathForWrite(tmp.toUri().getPath(), -1, conf); FSDataOutputStream out = fs.create(file); - out = CryptoUtils.wrapIfNecessary(conf, out); + out = IntermediateEncryptedStream.wrapIfNecessary(conf, out, tmp); return new Writer(conf, out, null, null, null, null, true); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java index 1b2ce1911210b..1d0d04326cd46 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java @@ -45,7 +45,7 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter; import org.slf4j.Logger; -import com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; /** * A set of named counters. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java index b3e2b4ade80fc..c319406bf5819 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java @@ -48,7 +48,7 @@ import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.StringUtils; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java index c0dd6502f0709..db6138fae5a0c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java @@ -23,7 +23,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability; @@ -1886,6 +1886,52 @@ public void setJobEndNotificationURI(String uri) { set(JobContext.MR_JOB_END_NOTIFICATION_URL, uri); } + /** + * Returns the class to be invoked in order to send a notification + * after the job has completed (success/failure). + * + * @return the fully-qualified name of the class which implements + * {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier} set through the + * {@link org.apache.hadoop.mapreduce.MRJobConfig#MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS} + * property + * + * @see JobConf#setJobEndNotificationCustomNotifierClass(java.lang.String) + * @see org.apache.hadoop.mapreduce.MRJobConfig#MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS + */ + public String getJobEndNotificationCustomNotifierClass() { + return get(JobContext.MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS); + } + + /** + * Sets the class to be invoked in order to send a notification after the job + * has completed (success/failure). + * + * A notification url still has to be set which will be passed to + * {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier#notifyOnce( + * java.net.URL, org.apache.hadoop.conf.Configuration)} + * along with the Job's conf. + * + * If this is set instead of using a simple HttpURLConnection + * we'll create a new instance of this class + * which should be an implementation of + * {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier}, + * and we'll invoke that. + * + * @param customNotifierClassName the fully-qualified name of the class + * which implements + * {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier} + * + * @see JobConf#setJobEndNotificationURI(java.lang.String) + * @see + * org.apache.hadoop.mapreduce.MRJobConfig#MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS + */ + public void setJobEndNotificationCustomNotifierClass( + String customNotifierClassName) { + + set(JobContext.MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS, + customNotifierClassName); + } + /** * Get job-specific shared directory for use as scratch space * diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java index 81f6140124655..9158ec3b33720 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java @@ -31,7 +31,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * JobQueueClient is interface provided to the user in order to get diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java index a248f1401cb8d..5810698607c41 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.LinkedList; import java.util.List; +import java.util.StringJoiner; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; @@ -37,20 +38,27 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; -import com.google.common.collect.Iterables; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.snapshotIOStatistics; + /** * Utility class to fetch block locations for specified Input paths using a * configured number of threads. @@ -59,7 +67,7 @@ * configuration. */ @Private -public class LocatedFileStatusFetcher { +public class LocatedFileStatusFetcher implements IOStatisticsSource { public static final Logger LOG = LoggerFactory.getLogger(LocatedFileStatusFetcher.class.getName()); @@ -86,6 +94,12 @@ public class LocatedFileStatusFetcher { private volatile Throwable unknownError; + /** + * Demand created IO Statistics: only if the filesystem + * returns statistics does this fetch collect them. + */ + private IOStatisticsSnapshot iostats; + /** * Instantiate. * The newApi switch is only used to configure what exception is raised @@ -151,12 +165,13 @@ public Iterable getFileStatuses() throws InterruptedException, } } finally { lock.unlock(); + // either the scan completed or an error was raised. + // in the case of an error shutting down the executor will interrupt all + // active threads, which can add noise to the logs. + LOG.debug("Scan complete: shutting down"); + this.exec.shutdownNow(); } - // either the scan completed or an error was raised. - // in the case of an error shutting down the executor will interrupt all - // active threads, which can add noise to the logs. - LOG.debug("Scan complete: shutting down"); - this.exec.shutdownNow(); + if (this.unknownError != null) { LOG.debug("Scan failed", this.unknownError); if (this.unknownError instanceof Error) { @@ -224,7 +239,46 @@ private void decrementRunningAndCheckCompletion() { lock.unlock(); } } - + + /** + * Return any IOStatistics collected during listing. + * @return IO stats accrued. + */ + @Override + public synchronized IOStatistics getIOStatistics() { + return iostats; + } + + /** + * Add the statistics of an individual thread's scan. + * @param stats possibly null statistics. + */ + private void addResultStatistics(IOStatistics stats) { + if (stats != null) { + // demand creation of IO statistics. + synchronized (this) { + LOG.debug("Adding IOStatistics: {}", stats); + if (iostats == null) { + // demand create the statistics + iostats = snapshotIOStatistics(stats); + } else { + iostats.aggregate(stats); + } + } + } + } + + @Override + public String toString() { + final IOStatistics ioStatistics = getIOStatistics(); + StringJoiner stringJoiner = new StringJoiner(", ", + LocatedFileStatusFetcher.class.getSimpleName() + "[", "]"); + if (ioStatistics != null) { + stringJoiner.add("IOStatistics=" + ioStatistics); + } + return stringJoiner.toString(); + } + /** * Retrieves block locations for the given @link {@link FileStatus}, and adds * additional paths to the process queue if required. @@ -263,6 +317,8 @@ public Result call() throws Exception { } } } + // aggregate any stats + result.stats = retrieveIOStatistics(iter); } else { result.locatedFileStatuses.add(fileStatus); } @@ -273,6 +329,7 @@ private static class Result { private List locatedFileStatuses = new LinkedList<>(); private List dirsNeedingRecursiveCalls = new LinkedList<>(); private FileSystem fs; + private IOStatistics stats; } } @@ -287,6 +344,7 @@ private class ProcessInputDirCallback implements @Override public void onSuccess(ProcessInputDirCallable.Result result) { try { + addResultStatistics(result.stats); if (!result.locatedFileStatuses.isEmpty()) { resultQueue.add(result.locatedFileStatuses); } @@ -401,4 +459,10 @@ public void onFailure(Throwable t) { registerError(t); } } + + @VisibleForTesting + ListeningExecutorService getListeningExecutorService() { + return exec; + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index 17461b196b37e..fa4396d77f458 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -63,6 +63,7 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; import org.apache.hadoop.mapreduce.lib.map.WrappedMapper; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex; import org.apache.hadoop.mapreduce.task.MapContextImpl; import org.apache.hadoop.mapreduce.CryptoUtils; @@ -1630,7 +1631,9 @@ private void sortAndSpill() throws IOException, ClassNotFoundException, IFile.Writer writer = null; try { long segmentStart = out.getPos(); - partitionOut = CryptoUtils.wrapIfNecessary(job, out, false); + partitionOut = + IntermediateEncryptedStream.wrapIfNecessary(job, out, false, + filename); writer = new Writer(job, partitionOut, keyClass, valClass, codec, spilledRecordsCounter); if (combinerRunner == null) { @@ -1687,6 +1690,7 @@ private void sortAndSpill() throws IOException, ClassNotFoundException, Path indexFilename = mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH); + IntermediateEncryptedStream.addSpillIndexFile(indexFilename, job); spillRec.writeToFile(indexFilename, job); } else { indexCacheList.add(spillRec); @@ -1727,7 +1731,9 @@ private void spillSingleRecord(final K key, final V value, try { long segmentStart = out.getPos(); // Create a new codec, don't care! - partitionOut = CryptoUtils.wrapIfNecessary(job, out, false); + partitionOut = + IntermediateEncryptedStream.wrapIfNecessary(job, out, false, + filename); writer = new IFile.Writer(job, partitionOut, keyClass, valClass, codec, spilledRecordsCounter); @@ -1761,6 +1767,7 @@ private void spillSingleRecord(final K key, final V value, Path indexFilename = mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH); + IntermediateEncryptedStream.addSpillIndexFile(indexFilename, job); spillRec.writeToFile(indexFilename, job); } else { indexCacheList.add(spillRec); @@ -1854,15 +1861,19 @@ private void mergeParts() throws IOException, InterruptedException, finalOutFileSize += rfs.getFileStatus(filename[i]).getLen(); } if (numSpills == 1) { //the spill is the final output + Path indexFileOutput = + mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]); sameVolRename(filename[0], mapOutputFile.getOutputFileForWriteInVolume(filename[0])); if (indexCacheList.size() == 0) { - sameVolRename(mapOutputFile.getSpillIndexFile(0), - mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0])); + Path indexFilePath = mapOutputFile.getSpillIndexFile(0); + IntermediateEncryptedStream.validateSpillIndexFile( + indexFilePath, job); + sameVolRename(indexFilePath, indexFileOutput); } else { - indexCacheList.get(0).writeToFile( - mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]), job); + indexCacheList.get(0).writeToFile(indexFileOutput, job); } + IntermediateEncryptedStream.addSpillIndexFile(indexFileOutput, job); sortPhase.complete(); return; } @@ -1870,6 +1881,7 @@ private void mergeParts() throws IOException, InterruptedException, // read in paged indices for (int i = indexCacheList.size(); i < numSpills; ++i) { Path indexFileName = mapOutputFile.getSpillIndexFile(i); + IntermediateEncryptedStream.validateSpillIndexFile(indexFileName, job); indexCacheList.add(new SpillRecord(indexFileName, job)); } @@ -1881,7 +1893,7 @@ private void mergeParts() throws IOException, InterruptedException, mapOutputFile.getOutputFileForWrite(finalOutFileSize); Path finalIndexFile = mapOutputFile.getOutputIndexFileForWrite(finalIndexFileSize); - + IntermediateEncryptedStream.addSpillIndexFile(finalIndexFile, job); //The output stream for the final single output file FSDataOutputStream finalOut = rfs.create(finalOutputFile, true, 4096); FSDataOutputStream finalPartitionOut = null; @@ -1893,8 +1905,9 @@ private void mergeParts() throws IOException, InterruptedException, try { for (int i = 0; i < partitions; i++) { long segmentStart = finalOut.getPos(); - finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut, - false); + finalPartitionOut = + IntermediateEncryptedStream.wrapIfNecessary(job, finalOut, + false, finalOutputFile); Writer writer = new Writer(job, finalPartitionOut, keyClass, valClass, codec, null); writer.close(); @@ -1957,7 +1970,8 @@ private void mergeParts() throws IOException, InterruptedException, //write merged output to disk long segmentStart = finalOut.getPos(); - finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut, false); + finalPartitionOut = IntermediateEncryptedStream.wrapIfNecessary(job, + finalOut, false, finalOutputFile); Writer writer = new Writer(job, finalPartitionOut, keyClass, valClass, codec, spilledRecordsCounter); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java index 16f88370bb6e3..d783752cf717d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java @@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.util.PriorityQueue; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Progressable; @@ -302,7 +303,7 @@ void init(Counters.Counter readsCounter) throws IOException { FSDataInputStream in = fs.open(file); in.seek(segmentOffset); - in = CryptoUtils.wrapIfNecessary(conf, in); + in = IntermediateEncryptedStream.wrapIfNecessary(conf, in, file); reader = new Reader(conf, in, segmentLength - CryptoUtils.cryptoPadding(conf), codec, readsCounter); @@ -730,7 +731,8 @@ RawKeyValueIterator merge(Class keyClass, Class valueClass, approxOutputSize, conf); FSDataOutputStream out = fs.create(outputFile); - out = CryptoUtils.wrapIfNecessary(conf, out); + out = IntermediateEncryptedStream.wrapIfNecessary(conf, out, + outputFile); Writer writer = new Writer(conf, out, keyClass, valueClass, codec, writesCounter, true); writeFile(this, writer, reporter, conf); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java index 9b62afc34169e..8ada00377f5b7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java @@ -34,7 +34,7 @@ import javax.crypto.SecretKey; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java index 54e1e67562a48..f5e07e9128a1c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java @@ -56,7 +56,7 @@ import org.apache.log4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * A simple logger to handle the task-specific user logs. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java index 5c871f0f46b26..45b4fd6f717ba 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java @@ -27,7 +27,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.compress.*; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * An {@link InputFormat} for plain text files. Files are broken into lines. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java index 90ce57aa36cc8..125c7568c289a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java @@ -175,7 +175,7 @@ protected V generateActualValue(K key, V value) { /** - * Generate the outfile name based on a given anme and the input file name. If + * Generate the outfile name based on a given name and the input file name. If * the {@link JobContext#MAP_INPUT_FILE} does not exists (i.e. this is not for a map only job), * the given name is returned unchanged. If the config value for * "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given @@ -187,7 +187,7 @@ protected V generateActualValue(K key, V value) { * the job config * @param name * the output file name - * @return the outfile name based on a given anme and the input file name. + * @return the outfile name based on a given name and the input file name. */ protected String getInputFileBasedOutputFileName(JobConf job, String name) { String infilepath = job.get(MRJobConfig.MAP_INPUT_FILE); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java index bed423a976a35..85830a95e2b8c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java @@ -27,7 +27,7 @@ import java.util.ServiceConfigurationError; import java.util.ServiceLoader; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CustomJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CustomJobEndNotifier.java new file mode 100644 index 0000000000000..e5a6b9f1dff36 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CustomJobEndNotifier.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import org.apache.hadoop.conf.Configuration; + +import java.net.URL; + +/** + * An interface for implementing a custom Job end notifier. The built-in + * Job end notifier uses a simple HTTP connection to notify the Job end status. + * By implementing this interface and setting the + * {@link MRJobConfig#MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS} property + * in the map-reduce Job configuration you can have your own + * notification mechanism. For now this still only works with HTTP/HTTPS URLs, + * but by implementing this class you can choose how you want to make the + * notification itself. For example you can choose to use a custom + * HTTP library, or do a delegation token authentication, maybe set a + * custom SSL context on the connection, etc. This means you still have to set + * the {@link MRJobConfig#MR_JOB_END_NOTIFICATION_URL} property + * in the Job's conf. + */ +public interface CustomJobEndNotifier { + + /** + * The implementation should try to do a Job end notification only once. + * + * See {@link MRJobConfig#MR_JOB_END_RETRY_ATTEMPTS}, + * {@link MRJobConfig#MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS} + * and org.apache.hadoop.mapreduce.v2.app.JobEndNotifier on how exactly + * this method will be invoked. + * + * @param url the URL which needs to be notified + * (see {@link MRJobConfig#MR_JOB_END_NOTIFICATION_URL}) + * @param jobConf the map-reduce Job's configuration + * + * @return true if the notification was successful + */ + boolean notifyOnce(URL url, Configuration jobConf) throws Exception; + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java index 31e2057e8dfbf..c9fe2c377f009 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java @@ -43,7 +43,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ReservationId; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1450,26 +1450,21 @@ public static void setArchiveSharedCacheUploadPolicies(Configuration conf, */ private static void setSharedCacheUploadPolicies(Configuration conf, Map policies, boolean areFiles) { - if (policies != null) { - StringBuilder sb = new StringBuilder(); - Iterator> it = policies.entrySet().iterator(); - Map.Entry e; - if (it.hasNext()) { - e = it.next(); - sb.append(e.getKey() + DELIM + e.getValue()); - } else { - // policies is an empty map, just skip setting the parameter - return; - } - while (it.hasNext()) { - e = it.next(); - sb.append("," + e.getKey() + DELIM + e.getValue()); - } - String confParam = - areFiles ? MRJobConfig.CACHE_FILES_SHARED_CACHE_UPLOAD_POLICIES - : MRJobConfig.CACHE_ARCHIVES_SHARED_CACHE_UPLOAD_POLICIES; - conf.set(confParam, sb.toString()); + String confParam = areFiles ? + MRJobConfig.CACHE_FILES_SHARED_CACHE_UPLOAD_POLICIES : + MRJobConfig.CACHE_ARCHIVES_SHARED_CACHE_UPLOAD_POLICIES; + // If no policy is provided, we will reset the config by setting an empty + // string value. In other words, cleaning up existing policies. This is + // useful when we try to clean up shared cache upload policies for + // non-application master tasks. See MAPREDUCE-7294 for details. + if (policies == null || policies.size() == 0) { + conf.set(confParam, ""); + return; } + StringBuilder sb = new StringBuilder(); + policies.forEach((k,v) -> sb.append(k).append(DELIM).append(v).append(",")); + sb.deleteCharAt(sb.length() - 1); + conf.set(confParam, sb.toString()); } /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java index c8686d7162e14..a7cdf34d2ac47 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java @@ -48,7 +48,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class is responsible for uploading resources from the client to HDFS diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java index 303aa138d9dc2..f6e66db23697e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java @@ -21,7 +21,7 @@ import java.io.FileNotFoundException; import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index ebf9341048fb2..dd6c92204640c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -63,7 +63,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.api.records.ReservationId; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; @InterfaceAudience.Private @InterfaceStability.Unstable diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java index ff924130358d9..a90c58dd28b4c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java @@ -372,6 +372,29 @@ public interface MRJobConfig { public static final int TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS_DEFAULT = 20 * 1000; + /** + * TaskAttemptListenerImpl will log the task progress when the delta progress + * is larger than or equal the defined value. + * The double value has to be between 0, and 1 with two decimals. + */ + String TASK_LOG_PROGRESS_DELTA_THRESHOLD = + "mapreduce.task.log.progress.delta.threshold"; + /** + * Default delta progress is set to 5%. + */ + double TASK_LOG_PROGRESS_DELTA_THRESHOLD_DEFAULT = 0.05; + /** + * TaskAttemptListenerImpl will log the task progress when the defined value + * in seconds expires. + * This helps to debug task attempts that are doing very slow progress. + */ + String TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS = + "mapreduce.task.log.progress.wait.interval-seconds"; + /** + * Default period to log the task attempt progress is 60 seconds. + */ + long TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS_DEFAULT = 60L; + public static final String TASK_ID = "mapreduce.task.id"; public static final String TASK_OUTPUT_DIR = "mapreduce.task.output.dir"; @@ -1082,6 +1105,9 @@ public interface MRJobConfig { public static final String MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL = "mapreduce.job.end-notification.max.retry.interval"; + String MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS = + "mapreduce.job.end-notification.custom-notifier-class"; + public static final int DEFAULT_MR_JOB_END_NOTIFICATION_TIMEOUT = 5000; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java index dc026cad5baa8..8a15077672d46 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.mapreduce.counters; -import com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.mapreduce.Counter; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounterGroup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounterGroup.java index d0339b02ff628..bf2d2617d76c7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounterGroup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounterGroup.java @@ -32,7 +32,7 @@ import org.apache.hadoop.mapreduce.util.ResourceBundles; import org.apache.hadoop.util.StringInterner; -import com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; /** * An abstract class to provide common implementation of the diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java index a1c0f68c50715..ad70572327166 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java @@ -42,9 +42,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Iterables; -import com.google.common.collect.Iterators; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * An abstract class to provide common implementation for the Counters diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java index 7f392f2cb5db4..278d0a73a0cbd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/CounterGroupFactory.java @@ -21,8 +21,8 @@ import java.util.List; import java.util.Map; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.mapreduce.Counter; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java index ed7f271bcd7b7..97d19a668dec0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java @@ -27,11 +27,11 @@ import java.util.Iterator; import java.util.Map; -import com.google.common.base.Joiner; -import static com.google.common.base.Preconditions.*; -import com.google.common.collect.AbstractIterator; -import com.google.common.collect.Iterators; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import org.apache.hadoop.thirdparty.com.google.common.collect.AbstractIterator; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java index 5324223e013bf..ca7f1f06514d8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java @@ -18,7 +18,7 @@ package org.apache.hadoop.mapreduce.counters; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import java.io.DataInput; import java.io.DataOutput; @@ -34,8 +34,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.AbstractIterator; -import com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.AbstractIterator; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; /** * An abstract class to provide common implementation for the framework diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java index 02ec53e559c0f..b6643eb240b77 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Event Writer is an utility class used to write events to the underlying diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java index da31591a26d42..4c43c7e0c2613 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java @@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** * Event to record Failed and Killed completion of jobs diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java index b16e12729247a..6f3cf6f038065 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java @@ -52,9 +52,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.HashMultiset; -import com.google.common.collect.Multiset; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultiset; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multiset; /** * An abstract {@link InputFormat} that returns {@link CombineFileSplit}'s in diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java index 22efe1471f91f..2d88d13a15462 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java @@ -47,7 +47,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * A base class for file-based {@link InputFormat}s. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java index b55d4e442ea80..d15ee7ca4c3fb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java @@ -32,7 +32,7 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** An {@link InputFormat} for plain text files. Files are broken into lines. * Either linefeed or carriage-return are used to signal end of line. Keys are diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java index 7f1ea6175ed6e..2973fb05f500b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java @@ -36,8 +36,9 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskAttemptID; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -454,43 +455,44 @@ protected void commitJobInternal(JobContext context) throws IOException { */ private void mergePaths(FileSystem fs, final FileStatus from, final Path to, JobContext context) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Merging data from " + from + " to " + to); - } - reportProgress(context); - FileStatus toStat; - try { - toStat = fs.getFileStatus(to); - } catch (FileNotFoundException fnfe) { - toStat = null; - } - - if (from.isFile()) { - if (toStat != null) { - if (!fs.delete(to, true)) { - throw new IOException("Failed to delete " + to); - } + try (DurationInfo d = new DurationInfo(LOG, + false, + "Merging data from %s to %s", from, to)) { + reportProgress(context); + FileStatus toStat; + try { + toStat = fs.getFileStatus(to); + } catch (FileNotFoundException fnfe) { + toStat = null; } - if (!fs.rename(from.getPath(), to)) { - throw new IOException("Failed to rename " + from + " to " + to); - } - } else if (from.isDirectory()) { - if (toStat != null) { - if (!toStat.isDirectory()) { + if (from.isFile()) { + if (toStat != null) { if (!fs.delete(to, true)) { throw new IOException("Failed to delete " + to); } - renameOrMerge(fs, from, to, context); - } else { - //It is a directory so merge everything in the directories - for (FileStatus subFrom : fs.listStatus(from.getPath())) { - Path subTo = new Path(to, subFrom.getPath().getName()); - mergePaths(fs, subFrom, subTo, context); + } + + if (!fs.rename(from.getPath(), to)) { + throw new IOException("Failed to rename " + from + " to " + to); + } + } else if (from.isDirectory()) { + if (toStat != null) { + if (!toStat.isDirectory()) { + if (!fs.delete(to, true)) { + throw new IOException("Failed to delete " + to); + } + renameOrMerge(fs, from, to, context); + } else { + //It is a directory so merge everything in the directories + for (FileStatus subFrom : fs.listStatus(from.getPath())) { + Path subTo = new Path(to, subFrom.getPath().getName()); + mergePaths(fs, subFrom, subTo, context); + } } + } else { + renameOrMerge(fs, from, to, context); } - } else { - renameOrMerge(fs, from, to, context); } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java index bbda26ac79c38..15ff3c67c2fb5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.text.NumberFormat; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NamedCommitterFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NamedCommitterFactory.java index b7378afdace3d..ddcff646e04fd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NamedCommitterFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NamedCommitterFactory.java @@ -22,7 +22,7 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java index c4e397d36c3ca..d440c021f2885 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PartialFileOutputCommitter.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** An {@link OutputCommitter} that commits files specified * in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java index 5e25f50fefa4c..e463632fa56ae 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java @@ -20,7 +20,7 @@ import java.io.IOException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/IntermediateEncryptedStream.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/IntermediateEncryptedStream.java new file mode 100644 index 0000000000000..eb14a208c9961 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/IntermediateEncryptedStream.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.security; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapreduce.CryptoUtils; + +/** + * Used to wrap helpers while spilling intermediate files. + * Setting the {@link SpillCallBackInjector} helps in: + * 1- adding callbacks to capture the path of the spilled files. + * 2- Verifying the encryption when intermediate encryption is enabled. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class IntermediateEncryptedStream { + + private static SpillCallBackInjector prevSpillCBInjector = null; + + public static FSDataOutputStream wrapIfNecessary(Configuration conf, + FSDataOutputStream out, Path outPath) throws IOException { + SpillCallBackInjector.get().writeSpillFileCB(outPath, out, conf); + return CryptoUtils.wrapIfNecessary(conf, out, true); + } + + public static FSDataOutputStream wrapIfNecessary(Configuration conf, + FSDataOutputStream out, boolean closeOutputStream, + Path outPath) throws IOException { + SpillCallBackInjector.get().writeSpillFileCB(outPath, out, conf); + return CryptoUtils.wrapIfNecessary(conf, out, closeOutputStream); + } + + public static FSDataInputStream wrapIfNecessary(Configuration conf, + FSDataInputStream in, Path inputPath) throws IOException { + SpillCallBackInjector.get().getSpillFileCB(inputPath, in, conf); + return CryptoUtils.wrapIfNecessary(conf, in); + } + + public static InputStream wrapIfNecessary(Configuration conf, + InputStream in, long length, Path inputPath) throws IOException { + SpillCallBackInjector.get().getSpillFileCB(inputPath, in, conf); + return CryptoUtils.wrapIfNecessary(conf, in, length); + } + + public static void addSpillIndexFile(Path indexFilename, Configuration conf) { + SpillCallBackInjector.get().addSpillIndexFileCB(indexFilename, conf); + } + + public static void validateSpillIndexFile(Path indexFilename, + Configuration conf) { + SpillCallBackInjector.get().validateSpillIndexFileCB(indexFilename, conf); + } + + public static SpillCallBackInjector resetSpillCBInjector() { + return setSpillCBInjector(prevSpillCBInjector); + } + + public synchronized static SpillCallBackInjector setSpillCBInjector( + SpillCallBackInjector spillInjector) { + prevSpillCBInjector = + SpillCallBackInjector.getAndSet(spillInjector); + return spillInjector; + } + + private IntermediateEncryptedStream() {} +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java index b083156082666..cdd656ca48907 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackInjector.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackInjector.java new file mode 100644 index 0000000000000..9b23c518f1a3b --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackInjector.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.security; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +/** + * Used for injecting callbacks while spilling files. + * Calls into this are a no-op in production code. + */ +@VisibleForTesting +@InterfaceAudience.Private +public class SpillCallBackInjector { + private static SpillCallBackInjector instance = new SpillCallBackInjector(); + public static SpillCallBackInjector get() { + return instance; + } + /** + * Sets the global SpillFilesCBInjector to the new value, returning the old + * value. + * + * @param spillInjector the new implementation for the spill injector. + * @return the previous implementation. + */ + public static SpillCallBackInjector getAndSet( + SpillCallBackInjector spillInjector) { + SpillCallBackInjector prev = instance; + instance = spillInjector; + return prev; + } + + public void writeSpillIndexFileCB(Path path) { + // do nothing + } + + public void writeSpillFileCB(Path path, FSDataOutputStream out, + Configuration conf) { + // do nothing + } + + public void getSpillFileCB(Path path, InputStream is, Configuration conf) { + // do nothing + } + + public String getSpilledFileReport() { + return null; + } + + public void handleErrorInSpillFill(Path path, Exception e) { + // do nothing + } + + public void corruptSpilledFile(Path fileName) throws IOException { + // do nothing + } + + public void addSpillIndexFileCB(Path path, Configuration conf) { + // do nothing + } + + public void validateSpillIndexFileCB(Path path, Configuration conf) { + // do nothing + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackPathsFinder.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackPathsFinder.java new file mode 100644 index 0000000000000..7be99e556e597 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SpillCallBackPathsFinder.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.security; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoStreamUtils; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapreduce.CryptoUtils; + +/** + * An implementation class that keeps track of the spilled files. + */ +public class SpillCallBackPathsFinder extends SpillCallBackInjector { + private static final Logger LOG = + LoggerFactory.getLogger(SpillCallBackPathsFinder.class); + /** + * Encrypted spilled files. + */ + private final Map> encryptedSpillFiles = + Collections.synchronizedMap(new ConcurrentHashMap<>()); + /** + * Non-Encrypted spilled files. + */ + private final Map> spillFiles = + Collections.synchronizedMap(new ConcurrentHashMap<>()); + /** + * Invalid position access. + */ + private final Map> invalidAccessMap = + Collections.synchronizedMap(new ConcurrentHashMap<>()); + /** + * Index spill files. + */ + private final Set indexSpillFiles = ConcurrentHashMap.newKeySet(); + /** + * Paths that were not found in the maps. + */ + private final Set negativeCache = ConcurrentHashMap.newKeySet(); + + protected Map> getFilesMap(Configuration config) { + if (CryptoUtils.isEncryptedSpillEnabled(config)) { + return encryptedSpillFiles; + } + return spillFiles; + } + + @Override + public void writeSpillFileCB(Path path, FSDataOutputStream out, + Configuration conf) { + long outPos = out.getPos(); + getFilesMap(conf) + .computeIfAbsent(path, p -> ConcurrentHashMap.newKeySet()) + .add(outPos); + LOG.debug("writeSpillFileCB.. path:{}; pos:{}", path, outPos); + } + + @Override + public void getSpillFileCB(Path path, InputStream is, Configuration conf) { + if (path == null) { + return; + } + Set pathEntries = getFilesMap(conf).get(path); + if (pathEntries != null) { + try { + long isPos = CryptoStreamUtils.getInputStreamOffset(is); + if (pathEntries.contains(isPos)) { + LOG.debug("getSpillFileCB... Path {}; Pos: {}", path, isPos); + return; + } + invalidAccessMap + .computeIfAbsent(path, p -> ConcurrentHashMap.newKeySet()) + .add(isPos); + LOG.debug("getSpillFileCB... access incorrect position.. " + + "Path {}; Pos: {}", path, isPos); + } catch (IOException e) { + LOG.error("Could not get inputStream position.. Path {}", path, e); + // do nothing + } + return; + } + negativeCache.add(path); + LOG.warn("getSpillFileCB.. Could not find spilled file .. Path: {}", path); + } + + @Override + public String getSpilledFileReport() { + StringBuilder strBuilder = + new StringBuilder("\n++++++++ Spill Report ++++++++") + .append(dumpMapEntries("Encrypted Spilled Files", + encryptedSpillFiles)) + .append(dumpMapEntries("Non-Encrypted Spilled Files", + spillFiles)) + .append(dumpMapEntries("Invalid Spill Access", + invalidAccessMap)) + .append("\n ----- Spilled Index Files ----- ") + .append(indexSpillFiles.size()); + for (Path p : indexSpillFiles) { + strBuilder.append("\n\t index-path: ").append(p.toString()); + } + strBuilder.append("\n ----- Negative Cache files ----- ") + .append(negativeCache.size()); + for (Path p : negativeCache) { + strBuilder.append("\n\t path: ").append(p.toString()); + } + return strBuilder.toString(); + } + + @Override + public void addSpillIndexFileCB(Path path, Configuration conf) { + if (path == null) { + return; + } + indexSpillFiles.add(path); + LOG.debug("addSpillIndexFileCB... Path: {}", path); + } + + @Override + public void validateSpillIndexFileCB(Path path, Configuration conf) { + if (path == null) { + return; + } + if (indexSpillFiles.contains(path)) { + LOG.debug("validateSpillIndexFileCB.. Path: {}", path); + return; + } + LOG.warn("validateSpillIndexFileCB.. could not retrieve indexFile.. " + + "Path: {}", path); + negativeCache.add(path); + } + + public Set getEncryptedSpilledFiles() { + return Collections.unmodifiableSet(encryptedSpillFiles.keySet()); + } + + /** + * Gets the set of path:pos of the entries that were accessed incorrectly. + * @return a set of string in the format of {@literal Path[Pos]} + */ + public Set getInvalidSpillEntries() { + Set result = new LinkedHashSet<>(); + for (Entry> spillMapEntry: invalidAccessMap.entrySet()) { + for (Long singleEntry : spillMapEntry.getValue()) { + result.add(String.format("%s[%d]", + spillMapEntry.getKey(), singleEntry)); + } + } + return result; + } + + private String dumpMapEntries(String label, + Map> entriesMap) { + StringBuilder strBuilder = + new StringBuilder(String.format("%n ----- %s ----- %d", label, + entriesMap.size())); + for (Entry> encryptedSpillEntry + : entriesMap.entrySet()) { + strBuilder.append(String.format("%n\t\tpath: %s", + encryptedSpillEntry.getKey())); + for (Long singlePos : encryptedSpillEntry.getValue()) { + strBuilder.append(String.format("%n\t\t\tentry: %d", singlePos)); + } + } + return strBuilder.toString(); + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/package-info.java new file mode 100644 index 0000000000000..451e6f6550381 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/package-info.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +/** + * Helper classes for the shuffle/spill encryptions. + */ +package org.apache.hadoop.mapreduce.security; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java index 5b316c1c2d546..d8bc68c6c71d5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java @@ -42,6 +42,7 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.security.SecureShuffleUtils; import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.security.ssl.SSLFactory; @@ -50,7 +51,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; class Fetcher extends Thread { @@ -512,7 +513,9 @@ private TaskAttemptID[] copyMapOutput(MapHost host, } InputStream is = input; - is = CryptoUtils.wrapIfNecessary(jobConf, is, compressedLength); + is = + IntermediateEncryptedStream.wrapIfNecessary(jobConf, is, + compressedLength, null); compressedLength -= CryptoUtils.cryptoPadding(jobConf); decompressedLength -= CryptoUtils.cryptoPadding(jobConf); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java index 90160cfa07c27..dc563eeab4d0f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java @@ -36,6 +36,8 @@ import org.apache.hadoop.mapred.SpillRecord; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -151,8 +153,10 @@ private boolean copyMapOutput(TaskAttemptID mapTaskId) throws IOException { FileSystem localFs = FileSystem.getLocal(job).getRaw(); FSDataInputStream inStream = localFs.open(mapOutputFileName); try { - inStream = CryptoUtils.wrapIfNecessary(job, inStream); - inStream.seek(ir.startOffset + CryptoUtils.cryptoPadding(job)); + inStream.seek(ir.startOffset); + inStream = + IntermediateEncryptedStream.wrapIfNecessary(job, inStream, + mapOutputFileName); mapOutput.shuffle(LOCALHOST, inStream, compressedLength, decompressedLength, metrics, reporter); } finally { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java index 58c2686b2799d..29724de0eb923 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java @@ -53,14 +53,14 @@ import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @SuppressWarnings(value={"unchecked"}) @InterfaceAudience.LimitedPrivate({"MapReduce"}) @@ -468,7 +468,9 @@ public void merge(List> inputs) throws IOException { mergeOutputSize).suffix( Task.MERGED_OUTPUT_PREFIX); - FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath)); + FSDataOutputStream out = + IntermediateEncryptedStream.wrapIfNecessary(jobConf, + rfs.create(outputPath), outputPath); Writer writer = new Writer(jobConf, out, (Class) jobConf.getMapOutputKeyClass(), (Class) jobConf.getMapOutputValueClass(), codec, null, true); @@ -552,7 +554,9 @@ public void merge(List inputs) throws IOException { localDirAllocator.getLocalPathForWrite(inputs.get(0).toString(), approxOutputSize, jobConf).suffix(Task.MERGED_OUTPUT_PREFIX); - FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath)); + FSDataOutputStream out = + IntermediateEncryptedStream.wrapIfNecessary(jobConf, + rfs.create(outputPath), outputPath); Writer writer = new Writer(jobConf, out, (Class) jobConf.getMapOutputKeyClass(), (Class) jobConf.getMapOutputValueClass(), codec, null, true); @@ -735,7 +739,9 @@ private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs, tmpDir, comparator, reporter, spilledRecordsCounter, null, mergePhase); - FSDataOutputStream out = CryptoUtils.wrapIfNecessary(job, fs.create(outputPath)); + FSDataOutputStream out = + IntermediateEncryptedStream.wrapIfNecessary(job, + fs.create(outputPath), outputPath); Writer writer = new Writer(job, out, keyClass, valueClass, codec, null, true); try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java index 98d7a57181b73..54a9522e2ccfe 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java @@ -37,10 +37,10 @@ import org.apache.hadoop.mapred.MapOutputFile; import org.apache.hadoop.mapreduce.TaskAttemptID; -import org.apache.hadoop.mapreduce.CryptoUtils; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Private @InterfaceStability.Unstable @@ -84,7 +84,8 @@ public OnDiskMapOutput(TaskAttemptID mapId, TaskAttemptID reduceId, this.fs = fs; this.outputPath = outputPath; tmpOutputPath = getTempPath(outputPath, fetcher); - disk = CryptoUtils.wrapIfNecessary(conf, fs.create(tmpOutputPath)); + disk = IntermediateEncryptedStream.wrapIfNecessary(conf, + fs.create(tmpOutputPath), tmpOutputPath); } @VisibleForTesting diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java index d5e97aabc2fdb..4e334958c719f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.mapreduce.task.reduce; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java index 3dc7cce34ea64..9b5b2f3665d87 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java @@ -35,7 +35,7 @@ import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.IntWritable; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java index 41081469bf8c0..82e73463c4cc3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java @@ -30,7 +30,7 @@ import java.util.HashSet; import java.util.Arrays; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -64,7 +64,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * Interprets the map reduce cli options diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java index ac16c127c29f8..9e32e70ede6a4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java @@ -21,7 +21,7 @@ import java.text.ParseException; import java.util.List; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IntWritable; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/MRJobConfUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/MRJobConfUtil.java index afedef3785f9f..4319e17416831 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/MRJobConfUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/MRJobConfUtil.java @@ -17,13 +17,22 @@ */ package org.apache.hadoop.mapreduce.util; +import java.io.File; +import java.util.concurrent.TimeUnit; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.MRJobConfig; /** * A class that contains utility methods for MR Job configuration. */ public final class MRJobConfUtil { + private static final Logger LOG = + LoggerFactory.getLogger(MRJobConfUtil.class); public static final String REDACTION_REPLACEMENT_VAL = "*********(redacted)"; /** @@ -58,4 +67,125 @@ public static long getTaskProgressReportInterval(final Configuration conf) { } public static final float TASK_REPORT_INTERVAL_TO_TIMEOUT_RATIO = 0.01f; + + /** + * Configurations to control the frequency of logging of task Attempt. + */ + public static final double PROGRESS_MIN_DELTA_FACTOR = 100.0; + private static volatile Double progressMinDeltaThreshold = null; + private static volatile Long progressMaxWaitDeltaTimeThreshold = null; + + /** + * load the values defined from a configuration file including the delta + * progress and the maximum time between each log message. + * @param conf + */ + public static void setTaskLogProgressDeltaThresholds( + final Configuration conf) { + if (progressMinDeltaThreshold == null) { + progressMinDeltaThreshold = + new Double(PROGRESS_MIN_DELTA_FACTOR + * conf.getDouble(MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD, + MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD_DEFAULT)); + } + + if (progressMaxWaitDeltaTimeThreshold == null) { + progressMaxWaitDeltaTimeThreshold = + TimeUnit.SECONDS.toMillis(conf + .getLong( + MRJobConfig.TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS, + MRJobConfig.TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS_DEFAULT)); + } + } + + /** + * Retrieves the min delta progress required to log the task attempt current + * progress. + * @return the defined threshold in the conf. + * returns the default value if + * {@link #setTaskLogProgressDeltaThresholds} has not been called. + */ + public static double getTaskProgressMinDeltaThreshold() { + if (progressMinDeltaThreshold == null) { + return PROGRESS_MIN_DELTA_FACTOR + * MRJobConfig.TASK_LOG_PROGRESS_DELTA_THRESHOLD_DEFAULT; + } + return progressMinDeltaThreshold.doubleValue(); + } + + /** + * Retrieves the min time required to log the task attempt current + * progress. + * @return the defined threshold in the conf. + * returns the default value if + * {@link #setTaskLogProgressDeltaThresholds} has not been called. + */ + public static long getTaskProgressWaitDeltaTimeThreshold() { + if (progressMaxWaitDeltaTimeThreshold == null) { + return TimeUnit.SECONDS.toMillis( + MRJobConfig.TASK_LOG_PROGRESS_WAIT_INTERVAL_SECONDS_DEFAULT); + } + return progressMaxWaitDeltaTimeThreshold.longValue(); + } + + /** + * Coverts a progress between 0.0 to 1.0 to double format used to log the + * task attempt. + * @param progress of the task which is a value between 0.0 and 1.0. + * @return the double value that is less than or equal to the argument + * multiplied by {@link #PROGRESS_MIN_DELTA_FACTOR}. + */ + public static double convertTaskProgressToFactor(final float progress) { + return Math.floor(progress * MRJobConfUtil.PROGRESS_MIN_DELTA_FACTOR); + } + + /** + * For unit tests, use urandom to avoid the YarnChild process from hanging + * on low entropy systems. + */ + private static final String TEST_JVM_SECURITY_EGD_OPT = + "-Djava.security.egd=file:/dev/./urandom"; + + public static Configuration initEncryptedIntermediateConfigsForTesting( + Configuration conf) { + Configuration config = + (conf == null) ? new Configuration(): conf; + final String childJVMOpts = + TEST_JVM_SECURITY_EGD_OPT.concat(" ") + .concat(config.get("mapred.child.java.opts", " ")); + // Set the jvm arguments. + config.set("yarn.app.mapreduce.am.admin-command-opts", + TEST_JVM_SECURITY_EGD_OPT); + config.set("mapred.child.java.opts", childJVMOpts); + config.setBoolean("mapreduce.job.encrypted-intermediate-data", true); + return config; + } + + /** + * Set local directories so that the generated folders is subdirectory of the + * test directories. + * @param conf + * @param testRootDir + * @return + */ + public static Configuration setLocalDirectoriesConfigForTesting( + Configuration conf, File testRootDir) { + Configuration config = + (conf == null) ? new Configuration(): conf; + final File hadoopLocalDir = new File(testRootDir, "hadoop-dir"); + // create the directory + if (!hadoopLocalDir.getAbsoluteFile().mkdirs()) { + LOG.info("{} directory already exists", hadoopLocalDir.getPath()); + } + Path mapredHadoopTempDir = new Path(hadoopLocalDir.getPath()); + Path mapredSystemDir = new Path(mapredHadoopTempDir, "system"); + Path stagingDir = new Path(mapredHadoopTempDir, "tmp/staging"); + // Set the temp directories a subdir of the test directory. + config.set("mapreduce.jobtracker.staging.root.dir", stagingDir.toString()); + config.set("mapreduce.jobtracker.system.dir", mapredSystemDir.toString()); + config.set("mapreduce.cluster.temp.dir", mapredHadoopTempDir.toString()); + config.set("mapreduce.cluster.local.dir", + new Path(mapredHadoopTempDir, "local").toString()); + return config; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index c40bb0b19c88b..844c91c5ce081 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -1415,6 +1415,23 @@ + + mapreduce.job.end-notification.custom-notifier-class + A class to be invoked in order to send a notification after the + job has completed (success/failure). The class must implement + org.apache.hadoop.mapreduce.CustomJobEndNotifier. A notification + url still has to be set which will be passed to the notifyOnce + method of your implementation along with the Job's configuration. + If this is set instead of using a simple HttpURLConnection we'll + create a new instance of this class. For now this still only works + with HTTP/HTTPS URLs, but by implementing this class you can choose + how you want to make the notification itself. For example you can + choose to use a custom HTTP library, or do a delegation token + authentication, maybe set a custom SSL context on the connection, etc. + The class needs to have a no-arg constructor. + + + mapreduce.job.log4j-properties-file diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java index 879cd3df321e6..0bf29a500661f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileInputFormat.java @@ -43,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @RunWith(value = Parameterized.class) public class TestFileInputFormat { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java new file mode 100644 index 0000000000000..c818a25f2f574 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapred; + +import java.io.File; +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.test.AbstractHadoopTestBase; +import org.apache.hadoop.test.GenericTestUtils; + +/** + * Test that the executor service has been shut down + * when the LocatedFileStatusFetcher is interrupted. + */ +public class TestLocatedFileStatusFetcher extends AbstractHadoopTestBase { + + private Configuration conf; + private FileSystem fileSys; + private boolean mkdirs; + private File dir = GenericTestUtils.getTestDir("test-lfs-fetcher"); + private static final CountDownLatch LATCH = new CountDownLatch(1); + + @Before + public void setup() throws Exception { + conf = new Configuration(false); + conf.set("fs.file.impl", MockFileSystem.class.getName()); + fileSys = FileSystem.getLocal(conf); + } + + @After + public void after() { + if (mkdirs) { + FileUtil.fullyDelete(dir); + } + } + + @Test + public void testExecutorsShutDown() throws Exception { + Path scanPath = new Path(dir.getAbsolutePath()); + mkdirs = fileSys.mkdirs(scanPath); + Path[] dirs = new Path[] {scanPath}; + final LocatedFileStatusFetcher fetcher = new LocatedFileStatusFetcher(conf, + dirs, true, new PathFilter() { + @Override + public boolean accept(Path path) { + return true; + } + }, true); + + Thread t = new Thread() { + @Override + public void run() { + try { + fetcher.getFileStatuses(); + } catch (Exception e) { + // This should interrupt condition.await() + Assert.assertTrue(e instanceof InterruptedException); + } + } + }; + + t.start(); + LATCH.await(); + + t.interrupt(); + t.join(); + // Check the status for executor service + Assert.assertTrue("The executor service should have been shut down", + fetcher.getListeningExecutorService().isShutdown()); + } + + static class MockFileSystem extends LocalFileSystem { + @Override + public FileStatus[] globStatus(Path pathPattern, PathFilter filter) + throws IOException { + // The executor service now is running tasks + LATCH.countDown(); + try { + // Try to sleep some time to + // let LocatedFileStatusFetcher#getFileStatuses be interrupted before + // the getting file info task finishes. + Thread.sleep(5000); + } catch (InterruptedException e) { + // Ignore this exception + } + return super.globStatus(pathPattern, filter); + } + + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java index bbfe2fb03c37b..c49d77108b0f0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java @@ -399,6 +399,7 @@ private void testOriginalPathWithTrailingSlash(Path path, Path expectedRemotePath) throws IOException, URISyntaxException { Path dstPath = new Path("hdfs://localhost:1234/home/hadoop/"); DistributedFileSystem fs = mock(DistributedFileSystem.class); + when(fs.makeQualified(any(Path.class))).thenReturn(dstPath); // make sure that FileUtils.copy() doesn't try to copy anything when(fs.mkdirs(any(Path.class))).thenReturn(false); when(fs.getUri()).thenReturn(dstPath.toUri()); @@ -407,6 +408,7 @@ private void testOriginalPathWithTrailingSlash(Path path, JobConf jConf = new JobConf(); Path originalPath = spy(path); FileSystem localFs = mock(FileSystem.class); + when(localFs.makeQualified(any(Path.class))).thenReturn(path); FileStatus fileStatus = mock(FileStatus.class); when(localFs.getFileStatus(any(Path.class))).thenReturn(fileStatus); when(fileStatus.isDirectory()).thenReturn(true); @@ -420,8 +422,14 @@ private void testOriginalPathWithTrailingSlash(Path path, originalPath, jConf, (short) 1); ArgumentCaptor pathCaptor = ArgumentCaptor.forClass(Path.class); - verify(fs).makeQualified(pathCaptor.capture()); - Assert.assertEquals("Path", expectedRemotePath, pathCaptor.getValue()); + verify(fs, times(2)).makeQualified(pathCaptor.capture()); + List paths = pathCaptor.getAllValues(); + // first call is invoked on a path which was created by the test, + // but the second one is created in copyRemoteFiles() + Assert.assertEquals("Expected remote path", + expectedRemotePath, paths.get(0)); + Assert.assertEquals("Expected remote path", + expectedRemotePath, paths.get(1)); } private void testErasureCodingSetting(boolean defaultBehavior) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java index 3897a9b2b3b93..f8ae7156a7a45 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java @@ -23,8 +23,7 @@ import java.util.Collection; import java.util.List; import java.util.Set; - -import javax.annotation.Nullable; +import java.util.stream.Collectors; import org.junit.Assert; @@ -49,10 +48,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Function; -import com.google.common.collect.Iterables; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; @RunWith(value = Parameterized.class) public class TestFileInputFormat { @@ -353,13 +350,10 @@ public static void verifyFileStatuses(List expectedPaths, List fetchedStatuses, final FileSystem localFs) { Assert.assertEquals(expectedPaths.size(), fetchedStatuses.size()); - Iterable fqExpectedPaths = Iterables.transform(expectedPaths, - new Function() { - @Override - public Path apply(Path input) { - return localFs.makeQualified(input); - } - }); + Iterable fqExpectedPaths = + expectedPaths.stream().map( + input -> localFs.makeQualified(input)).collect(Collectors.toList()); + Set expectedPathSet = Sets.newHashSet(fqExpectedPaths); for (FileStatus fileStatus : fetchedStatuses) { @@ -374,13 +368,10 @@ public Path apply(Path input) { private void verifySplits(List expected, List splits) { - Iterable pathsFromSplits = Iterables.transform(splits, - new Function() { - @Override - public String apply(@Nullable InputSplit input) { - return ((FileSplit) input).getPath().toString(); - } - }); + Iterable pathsFromSplits = + splits.stream().map( + input-> ((FileSplit) input).getPath().toString()) + .collect(Collectors.toList()); Set expectedSet = Sets.newHashSet(expected); for (String splitPathString : pathsFromSplits) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java index 13cb6b32214fb..bd90941fa13cd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java @@ -24,6 +24,7 @@ import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -50,43 +51,60 @@ import org.apache.hadoop.mapred.Merger.Segment; import org.apache.hadoop.mapred.RawKeyValueIterator; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskID; import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Progressable; import org.junit.Assert; import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TestName; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; public class TestMerger { - - private Configuration conf; + private static File testRootDir; + @Rule + public TestName unitTestName = new TestName(); + private File unitTestDir; private JobConf jobConf; private FileSystem fs; + @BeforeClass + public static void setupClass() throws Exception { + // setup the test root directory + testRootDir = + GenericTestUtils.setupTestRootDir( + TestMerger.class); + } + @Before public void setup() throws IOException { - conf = new Configuration(); + unitTestDir = new File(testRootDir, unitTestName.getMethodName()); + unitTestDir.mkdirs(); jobConf = new JobConf(); - fs = FileSystem.getLocal(conf); + // Set the temp directories a subdir of the test directory. + MRJobConfUtil.setLocalDirectoriesConfigForTesting(jobConf, unitTestDir); + jobConf.set(MRConfig.FRAMEWORK_NAME, "local"); + fs = FileSystem.getLocal(jobConf); } - @Test public void testEncryptedMerger() throws Throwable { - jobConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); - conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); + // Enable intermediate encryption. + MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(jobConf); Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); TokenCache.setEncryptedSpillKey(new byte[16], credentials); UserGroupInformation.getCurrentUser().addCredentials(credentials); @@ -106,8 +124,8 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { LocalDirAllocator lda = new LocalDirAllocator(MRConfig.LOCAL_DIR); MergeManagerImpl mergeManager = new MergeManagerImpl( - reduceId1, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null, - null, null, new Progress(), new MROutputFiles()); + reduceId1, jobConf, fs, lda, Reporter.NULL, null, null, null, null, + null, null, null, new Progress(), new MROutputFiles()); // write map outputs Map map1 = new TreeMap(); @@ -115,12 +133,12 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { map1.put("carrot", "delicious"); Map map2 = new TreeMap(); map1.put("banana", "pretty good"); - byte[] mapOutputBytes1 = writeMapOutput(conf, map1); - byte[] mapOutputBytes2 = writeMapOutput(conf, map2); + byte[] mapOutputBytes1 = writeMapOutput(jobConf, map1); + byte[] mapOutputBytes2 = writeMapOutput(jobConf, map2); InMemoryMapOutput mapOutput1 = new InMemoryMapOutput( - conf, mapId1, mergeManager, mapOutputBytes1.length, null, true); + jobConf, mapId1, mergeManager, mapOutputBytes1.length, null, true); InMemoryMapOutput mapOutput2 = new InMemoryMapOutput( - conf, mapId2, mergeManager, mapOutputBytes2.length, null, true); + jobConf, mapId2, mergeManager, mapOutputBytes2.length, null, true); System.arraycopy(mapOutputBytes1, 0, mapOutput1.getMemory(), 0, mapOutputBytes1.length); System.arraycopy(mapOutputBytes2, 0, mapOutput2.getMemory(), 0, @@ -150,12 +168,12 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { map3.put("carrot", "amazing"); Map map4 = new TreeMap(); map4.put("banana", "bla"); - byte[] mapOutputBytes3 = writeMapOutput(conf, map3); - byte[] mapOutputBytes4 = writeMapOutput(conf, map4); + byte[] mapOutputBytes3 = writeMapOutput(jobConf, map3); + byte[] mapOutputBytes4 = writeMapOutput(jobConf, map4); InMemoryMapOutput mapOutput3 = new InMemoryMapOutput( - conf, mapId3, mergeManager, mapOutputBytes3.length, null, true); + jobConf, mapId3, mergeManager, mapOutputBytes3.length, null, true); InMemoryMapOutput mapOutput4 = new InMemoryMapOutput( - conf, mapId4, mergeManager, mapOutputBytes4.length, null, true); + jobConf, mapId4, mergeManager, mapOutputBytes4.length, null, true); System.arraycopy(mapOutputBytes3, 0, mapOutput3.getMemory(), 0, mapOutputBytes3.length); System.arraycopy(mapOutputBytes4, 0, mapOutput4.getMemory(), 0, @@ -174,12 +192,13 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { Assert.assertEquals(2, mergeManager.onDiskMapOutputs.size()); List paths = new ArrayList(); - Iterator iterator = mergeManager.onDiskMapOutputs.iterator(); + Iterator iterator = + mergeManager.onDiskMapOutputs.iterator(); List keys = new ArrayList(); List values = new ArrayList(); while (iterator.hasNext()) { CompressAwarePath next = iterator.next(); - readOnDiskMapOutput(conf, fs, next, keys, values); + readOnDiskMapOutput(jobConf, fs, next, keys, values); paths.add(next); } assertThat(keys).isEqualTo(Arrays.asList("apple", "banana", "carrot", @@ -189,8 +208,8 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { mergeManager.close(); mergeManager = new MergeManagerImpl( - reduceId2, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null, - null, null, new Progress(), new MROutputFiles()); + reduceId2, jobConf, fs, lda, Reporter.NULL, null, null, null, null, + null, null, null, new Progress(), new MROutputFiles()); MergeThread onDiskMerger = mergeManager.createOnDiskMerger(); onDiskMerger.merge(paths); @@ -199,7 +218,8 @@ public void testInMemoryAndOnDiskMerger() throws Throwable { keys = new ArrayList(); values = new ArrayList(); - readOnDiskMapOutput(conf, fs, mergeManager.onDiskMapOutputs.iterator().next(), keys, values); + readOnDiskMapOutput(jobConf, fs, + mergeManager.onDiskMapOutputs.iterator().next(), keys, values); assertThat(keys).isEqualTo(Arrays.asList("apple", "apple", "banana", "banana", "carrot", "carrot")); assertThat(values).isEqualTo(Arrays.asList("awesome", "disgusting", @@ -227,7 +247,8 @@ private byte[] writeMapOutput(Configuration conf, Map keysToValu private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path, List keys, List values) throws IOException { - FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path)); + FSDataInputStream in = + IntermediateEncryptedStream.wrapIfNecessary(conf, fs.open(path), path); IFile.Reader reader = new IFile.Reader(conf, in, fs.getFileStatus(path).getLen(), null, null); @@ -257,14 +278,16 @@ public void testUncompressed() throws IOException { @SuppressWarnings( { "unchecked" }) public void testMergeShouldReturnProperProgress( List> segments) throws IOException { - Path tmpDir = new Path("localpath"); + + Path tmpDir = new Path(jobConf.get("mapreduce.cluster.temp.dir"), + "localpath"); Class keyClass = (Class) jobConf.getMapOutputKeyClass(); Class valueClass = (Class) jobConf.getMapOutputValueClass(); RawComparator comparator = jobConf.getOutputKeyComparator(); Counter readsCounter = new Counter(); Counter writesCounter = new Counter(); Progress mergePhase = new Progress(); - RawKeyValueIterator mergeQueue = Merger.merge(conf, fs, keyClass, + RawKeyValueIterator mergeQueue = Merger.merge(jobConf, fs, keyClass, valueClass, segments, 2, tmpDir, comparator, getReporter(), readsCounter, writesCounter, mergePhase); final float epsilon = 0.00001f; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml index ee34f2c134403..c2082a5113811 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml @@ -19,11 +19,11 @@ hadoop-mapreduce-client org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-mapreduce-client-hs-plugins - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce HistoryServer Plugins diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml index 3be14c09d46d8..6975716d20d35 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml @@ -19,11 +19,11 @@ hadoop-mapreduce-client org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-mapreduce-client-hs - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce HistoryServer diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java index 69f483166e6e4..2315435e00263 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CachedHistoryStorage.java @@ -24,12 +24,12 @@ import java.util.SortedMap; import java.util.TreeMap; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.cache.Weigher; -import com.google.common.util.concurrent.UncheckedExecutionException; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.Weigher; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.UncheckedExecutionException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; @@ -42,7 +42,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java index 3e238cbc0f6a7..4908523275473 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocol; import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocolPB; @@ -93,7 +93,7 @@ private static HSAdminRefreshProtocol createHSProxyWithHSAdminRefreshProtocol( private static Object createHSProxy(InetSocketAddress address, Configuration conf, UserGroupInformation ugi, Class xface, int rpcTimeout) throws IOException { - RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine2.class); Object proxy = RPC.getProxy(xface, RPC.getProtocolVersion(xface), address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), rpcTimeout); return proxy; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index ccccaeb70dce1..40faf063f9881 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -90,7 +90,7 @@ import org.apache.hadoop.yarn.webapp.WebApp; import org.apache.hadoop.yarn.webapp.WebApps; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java index 825fb259dc9df..365e755848d72 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java @@ -70,8 +70,8 @@ import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.SystemClock; import org.slf4j.Logger; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java index 7bb4b52f8449b..2cdde81bf5e2d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java @@ -52,8 +52,8 @@ import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; import org.apache.hadoop.yarn.util.Clock; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java index 9ea140fa3166c..ddf449e05438f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java @@ -44,7 +44,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.logaggregation.AggregatedLogDeletionService; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,7 +64,6 @@ public class JobHistoryServer extends CompositeService { private static final Logger LOG = LoggerFactory.getLogger(JobHistoryServer.class); - protected HistoryContext historyContext; private HistoryClientService clientService; private JobHistory jobHistoryService; protected JHSDelegationTokenSecretManager jhsDTSecretManager; @@ -128,7 +127,6 @@ protected void serviceInit(Configuration conf) throws Exception { throw new YarnRuntimeException("History Server Failed to login", ie); } jobHistoryService = new JobHistory(); - historyContext = (HistoryContext)jobHistoryService; stateStore = createStateStore(conf); this.jhsDTSecretManager = createJHSSecretManager(conf, stateStore); clientService = createHistoryClientService(); @@ -152,8 +150,7 @@ protected void serviceInit(Configuration conf) throws Exception { @VisibleForTesting protected HistoryClientService createHistoryClientService() { - return new HistoryClientService(historyContext, - this.jhsDTSecretManager); + return new HistoryClientService(jobHistoryService, this.jhsDTSecretManager); } protected JHSDelegationTokenSecretManager createJHSSecretManager( diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java index aa03bb6940cba..17d0abd467f47 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.security.AccessControlException; @@ -50,7 +50,7 @@ import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.HSAdminRefreshProtocolService; import org.apache.hadoop.mapreduce.v2.hs.protocolPB.HSAdminRefreshProtocolServerSideTranslatorPB; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -81,7 +81,7 @@ public HSAdminServer(AggregatedLogDeletionService aggLogDelService, @Override public void serviceInit(Configuration conf) throws Exception { RPC.setProtocolEngine(conf, RefreshUserMappingsProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = new RefreshUserMappingsProtocolServerSideTranslatorPB( this); @@ -154,7 +154,7 @@ protected void serviceStop() throws Exception { private void addProtocol(Configuration conf, Class protocol, BlockingService blockingService) throws IOException { - RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine2.class); clientRpcServer.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, blockingService); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java index ba93df92b1f88..dc18b648410b4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java @@ -77,7 +77,7 @@ import org.apache.hadoop.yarn.webapp.NotFoundException; import org.apache.hadoop.yarn.webapp.WebApp; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.google.inject.Inject; @Path("/ws/v1/history") @@ -434,10 +434,12 @@ public Response getAggregatedLogsMeta(@Context HttpServletRequest hsr, @QueryParam(YarnWebServiceParams.CONTAINER_ID) String containerIdStr, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) - @DefaultValue("false") boolean redirectedFromNode) { + @DefaultValue("false") boolean redirectedFromNode, + @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) + @DefaultValue("false") boolean manualRedirection) { init(); return logServlet.getLogsInfo(hsr, appIdStr, appAttemptIdStr, - containerIdStr, nmId, redirectedFromNode); + containerIdStr, nmId, redirectedFromNode, manualRedirection); } @GET @@ -449,14 +451,16 @@ public Response getContainerLogs(@Context HttpServletRequest hsr, @PathParam(YarnWebServiceParams.CONTAINER_ID) String containerIdStr, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) - @DefaultValue("false") boolean redirectedFromNode) { + @DefaultValue("false") boolean redirectedFromNode, + @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) + @DefaultValue("false") boolean manualRedirection) { init(); WrappedLogMetaRequest.Builder logMetaRequestBuilder = LogServlet.createRequestFromContainerId(containerIdStr); return logServlet.getContainerLogsInfo(hsr, logMetaRequestBuilder, nmId, - redirectedFromNode, null); + redirectedFromNode, null, manualRedirection); } @GET @@ -474,10 +478,12 @@ public Response getContainerLogFile(@Context HttpServletRequest req, String size, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) - @DefaultValue("false") boolean redirectedFromNode) { + @DefaultValue("false") boolean redirectedFromNode, + @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) + @DefaultValue("false") boolean manualRedirection) { init(); return logServlet.getLogFile(req, containerIdStr, filename, format, size, - nmId, redirectedFromNode, null); + nmId, redirectedFromNode, null, manualRedirection); } @VisibleForTesting diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java index d12a7295e6a41..63e90d1d90c58 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java @@ -28,7 +28,7 @@ import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java index f1d11a31afb42..b4222fb1cc34f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java @@ -40,7 +40,7 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.ApplicationId; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; public class MockHistoryJobs extends MockJobs { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java index 1208bacb21fc6..a71b709990c1e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Map; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistory.java index e9f1ba6e305d5..7d4ca5975bcb2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistory.java @@ -24,7 +24,7 @@ import java.util.LinkedList; import java.util.List; -import com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesLogs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesLogs.java index 22aa3acd9a31f..1fadcedf1994e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesLogs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesLogs.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.mapreduce.v2.hs.webapp; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml index 047951dbba95a..5f6c838ecea8e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml @@ -19,11 +19,11 @@ hadoop-mapreduce-client org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-mapreduce-client-jobclient - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce JobClient @@ -183,6 +183,7 @@ src/test/java/org/apache/hadoop/cli/data60bytes src/test/resources/job_1329348432655_0001-10.jhist + **/jobTokenPassword diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java index 2c2ff1f4368a8..f6720c530e356 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java @@ -80,7 +80,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class ClientServiceDelegate { private static final Logger LOG = diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java index 6fff0940d2268..a2f6c8580ee41 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java @@ -84,7 +84,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class ResourceMgrDelegate extends YarnClient { private static final Logger LOG = diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java index a9356e28a7d37..0fb3f77651e95 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java @@ -101,7 +101,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class enables the current JobClient (0.22 hadoop) to run on YARN. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java index 8d33b1580a8f8..f16b8a0f18fc2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java @@ -20,9 +20,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.GenericTestUtils; + import org.junit.After; import org.junit.Before; +import java.io.File; import java.io.IOException; import java.util.Map; import java.util.Properties; @@ -43,8 +46,18 @@ * The DFS filesystem is formated before the testcase starts and after it ends. */ public abstract class ClusterMapReduceTestCase { + private static File testRootDir; + private static File dfsFolder; + private MiniDFSCluster dfsCluster = null; - private MiniMRCluster mrCluster = null; + private MiniMRClientCluster mrCluster = null; + + protected static void setupClassBase(Class testClass) throws Exception { + // setup the test root directory + testRootDir = GenericTestUtils.setupTestRootDir(testClass); + dfsFolder = new File(testRootDir, "dfs"); + } + /** * Creates Hadoop Cluster and DFS before a test case is run. @@ -78,37 +91,10 @@ protected synchronized void startCluster(boolean reformatDFS, Properties props) conf.set((String) entry.getKey(), (String) entry.getValue()); } } - dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2) - .format(reformatDFS).racks(null).build(); - - ConfigurableMiniMRCluster.setConfiguration(props); - //noinspection deprecation - mrCluster = new ConfigurableMiniMRCluster(2, - getFileSystem().getUri().toString(), 1, conf); - } - } - - private static class ConfigurableMiniMRCluster extends MiniMRCluster { - private static Properties config; - - public static void setConfiguration(Properties props) { - config = props; - } - - public ConfigurableMiniMRCluster(int numTaskTrackers, String namenode, - int numDir, JobConf conf) - throws Exception { - super(0,0, numTaskTrackers, namenode, numDir, null, null, null, conf); - } - - public JobConf createJobConf() { - JobConf conf = super.createJobConf(); - if (config != null) { - for (Map.Entry entry : config.entrySet()) { - conf.set((String) entry.getKey(), (String) entry.getValue()); - } - } - return conf; + dfsCluster = + new MiniDFSCluster.Builder(conf, dfsFolder) + .numDataNodes(2).format(reformatDFS).racks(null).build(); + mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf); } } @@ -125,7 +111,7 @@ public JobConf createJobConf() { */ protected void stopCluster() throws Exception { if (mrCluster != null) { - mrCluster.shutdown(); + mrCluster.stop(); mrCluster = null; } if (dfsCluster != null) { @@ -157,17 +143,13 @@ protected FileSystem getFileSystem() throws IOException { return dfsCluster.getFileSystem(); } - protected MiniMRCluster getMRCluster() { - return mrCluster; - } - /** * Returns the path to the root directory for the testcase. * * @return path to the root directory for the testcase. */ protected Path getTestRootDir() { - return new Path("x").getParent(); + return new Path(testRootDir.getPath()); } /** @@ -194,8 +176,8 @@ protected Path getOutputDir() { * * @return configuration that works on the testcase Hadoop instance */ - protected JobConf createJobConf() { - return mrCluster.createJobConf(); + protected JobConf createJobConf() throws IOException { + return new JobConf(mrCluster.getConfig()); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java index 85c534bfb888a..33b85b92042cf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java @@ -55,7 +55,8 @@ public static MiniMRClientCluster create(Class caller, String identifier, Path appJar = new Path(testRootDir, "MRAppJar.jar"); // Copy MRAppJar and make it private. - Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR); + Path appMasterJar = + new Path(MiniMRYarnCluster.copyAppJarIntoTestDir(identifier)); fs.copyFromLocalFile(appMasterJar, appJar); fs.setPermission(appJar, new FsPermission("744")); @@ -64,7 +65,7 @@ public static MiniMRClientCluster create(Class caller, String identifier, job.addFileToClassPath(appJar); - Path callerJar = new Path(JarFinder.getJar(caller)); + Path callerJar = new Path(JarFinder.getJar(caller, identifier)); Path remoteCallerJar = new Path(testRootDir, callerJar.getName()); fs.copyFromLocalFile(callerJar, remoteCallerJar); fs.setPermission(remoteCallerJar, new FsPermission("744")); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRCluster.java index e7df5b304b536..2de885f861506 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRCluster.java @@ -271,4 +271,8 @@ public void shutdown() { } } + public MiniMRClientCluster getMrClientCluster() { + return mrClientCluster; + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRYarnClusterAdapter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRYarnClusterAdapter.java index 4f89840f300a9..684587d7c8720 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRYarnClusterAdapter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRYarnClusterAdapter.java @@ -74,4 +74,8 @@ public void restart() { miniMRYarnCluster.start(); } + public MiniMRYarnCluster getMiniMRYarnCluster() { + return miniMRYarnCluster; + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java index b45a2a6f8cfb4..1b39583bd729f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java @@ -37,6 +37,8 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.util.ReflectionUtils; + +import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; @@ -58,7 +60,12 @@ public class TestBadRecords extends ClusterMapReduceTestCase { Arrays.asList("hello08","hello10"); private List input; - + + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestBadRecords.class); + } + public TestBadRecords() { input = new ArrayList(); for(int i=1;i<=10;i++) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java index f04fbd7a29af2..b4e8de2723c57 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java @@ -29,6 +29,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; + +import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.assertTrue; @@ -36,6 +38,12 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertFalse; public class TestClusterMapReduceTestCase extends ClusterMapReduceTestCase { + + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestClusterMapReduceTestCase.class); + } + public void _testMapReduce(boolean restart) throws Exception { OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt")); Writer wr = new OutputStreamWriter(os); @@ -88,7 +96,6 @@ public void _testMapReduce(boolean restart) throws Exception { reader.close(); assertEquals(4, counter); } - } @Test diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobName.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobName.java index 2659a14a70b1d..f50089af4a1e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobName.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobName.java @@ -29,12 +29,19 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.lib.IdentityMapper; + +import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; public class TestJobName extends ClusterMapReduceTestCase { + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestJobName.class); + } + @Test public void testComplexName() throws Exception { OutputStream os = getFileSystem().create(new Path(getInputDir(), diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalJobSubmission.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalJobSubmission.java index a3ea26e81f0b6..c8b6c894d0c4b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalJobSubmission.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLocalJobSubmission.java @@ -31,8 +31,20 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.SleepJob; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; +import org.apache.hadoop.mapreduce.security.SpillCallBackPathsFinder; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.junit.Assert.*; @@ -41,8 +53,39 @@ * -jt local -libjars */ public class TestLocalJobSubmission { - private static Path TEST_ROOT_DIR = - new Path(System.getProperty("test.build.data","/tmp")); + private static final Logger LOG = + LoggerFactory.getLogger(TestLocalJobSubmission.class); + + private static File testRootDir; + + @Rule + public TestName unitTestName = new TestName(); + private File unitTestDir; + private Path jarPath; + private Configuration config; + + @BeforeClass + public static void setupClass() throws Exception { + // setup the test root directory + testRootDir = + GenericTestUtils.setupTestRootDir(TestLocalJobSubmission.class); + } + + @Before + public void setup() throws IOException { + unitTestDir = new File(testRootDir, unitTestName.getMethodName()); + unitTestDir.mkdirs(); + config = createConfig(); + jarPath = makeJar(new Path(unitTestDir.getAbsolutePath(), "test.jar")); + } + + private Configuration createConfig() { + // Set the temp directories a subdir of the test directory. + Configuration conf = + MRJobConfUtil.setLocalDirectoriesConfigForTesting(null, unitTestDir); + conf.set(MRConfig.FRAMEWORK_NAME, "local"); + return conf; + } /** * Test the local job submission options of -jt local -libjars. @@ -51,12 +94,9 @@ public class TestLocalJobSubmission { */ @Test public void testLocalJobLibjarsOption() throws IOException { - Configuration conf = new Configuration(); - - testLocalJobLibjarsOption(conf); - - conf.setBoolean(Job.USE_WILDCARD_FOR_LIBJARS, false); - testLocalJobLibjarsOption(conf); + testLocalJobLibjarsOption(config); + config.setBoolean(Job.USE_WILDCARD_FOR_LIBJARS, false); + testLocalJobLibjarsOption(config); } /** @@ -67,8 +107,6 @@ public void testLocalJobLibjarsOption() throws IOException { */ private void testLocalJobLibjarsOption(Configuration conf) throws IOException { - Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar")); - conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); conf.set(MRConfig.FRAMEWORK_NAME, "local"); final String[] args = { @@ -79,8 +117,7 @@ private void testLocalJobLibjarsOption(Configuration conf) try { res = ToolRunner.run(conf, new SleepJob(), args); } catch (Exception e) { - System.out.println("Job failed with " + e.getLocalizedMessage()); - e.printStackTrace(System.out); + LOG.error("Job failed with {}", e.getLocalizedMessage(), e); fail("Job failed"); } assertEquals("dist job res is not 0:", 0, res); @@ -93,18 +130,20 @@ private void testLocalJobLibjarsOption(Configuration conf) */ @Test public void testLocalJobEncryptedIntermediateData() throws IOException { - Configuration conf = new Configuration(); - conf.set(MRConfig.FRAMEWORK_NAME, "local"); - conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); + config = MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(config); final String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" }; int res = -1; try { - res = ToolRunner.run(conf, new SleepJob(), args); + SpillCallBackPathsFinder spillInjector = + (SpillCallBackPathsFinder) IntermediateEncryptedStream + .setSpillCBInjector(new SpillCallBackPathsFinder()); + res = ToolRunner.run(config, new SleepJob(), args); + Assert.assertTrue("No spill occurred", + spillInjector.getEncryptedSpilledFiles().size() > 0); } catch (Exception e) { - System.out.println("Job failed with " + e.getLocalizedMessage()); - e.printStackTrace(System.out); + LOG.error("Job failed with {}", e.getLocalizedMessage(), e); fail("Job failed"); } assertEquals("dist job res is not 0:", 0, res); @@ -116,15 +155,13 @@ public void testLocalJobEncryptedIntermediateData() throws IOException { */ @Test public void testJobMaxMapConfig() throws Exception { - Configuration conf = new Configuration(); - conf.set(MRConfig.FRAMEWORK_NAME, "local"); - conf.setInt(MRJobConfig.JOB_MAX_MAP, 0); + config.setInt(MRJobConfig.JOB_MAX_MAP, 0); final String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" }; int res = -1; try { - res = ToolRunner.run(conf, new SleepJob(), args); + res = ToolRunner.run(config, new SleepJob(), args); fail("Job should fail"); } catch (IllegalArgumentException e) { assertTrue(e.getLocalizedMessage().contains( @@ -139,20 +176,16 @@ public void testJobMaxMapConfig() throws Exception { */ @Test public void testLocalJobFilesOption() throws IOException { - Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar")); - - Configuration conf = new Configuration(); - conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); - conf.set(MRConfig.FRAMEWORK_NAME, "local"); - final String[] args = - {"-jt", "local", "-files", jarPath.toString(), "-m", "1", "-r", "1", - "-mt", "1", "-rt", "1"}; + config.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); + final String[] args = { + "-jt", "local", "-files", jarPath.toString(), + "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" + }; int res = -1; try { - res = ToolRunner.run(conf, new SleepJob(), args); + res = ToolRunner.run(config, new SleepJob(), args); } catch (Exception e) { - System.out.println("Job failed with " + e.getLocalizedMessage()); - e.printStackTrace(System.out); + LOG.error("Job failed with {}", e.getLocalizedMessage(), e); fail("Job failed"); } assertEquals("dist job res is not 0:", 0, res); @@ -165,27 +198,22 @@ public void testLocalJobFilesOption() throws IOException { */ @Test public void testLocalJobArchivesOption() throws IOException { - Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar")); - - Configuration conf = new Configuration(); - conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); - conf.set(MRConfig.FRAMEWORK_NAME, "local"); + config.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000"); final String[] args = {"-jt", "local", "-archives", jarPath.toString(), "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"}; int res = -1; try { - res = ToolRunner.run(conf, new SleepJob(), args); + res = ToolRunner.run(config, new SleepJob(), args); } catch (Exception e) { - System.out.println("Job failed with " + e.getLocalizedMessage()); - e.printStackTrace(System.out); + LOG.error("Job failed with {}" + e.getLocalizedMessage(), e); fail("Job failed"); } assertEquals("dist job res is not 0:", 0, res); } private Path makeJar(Path p) throws IOException { - FileOutputStream fos = new FileOutputStream(new File(p.toString())); + FileOutputStream fos = new FileOutputStream(p.toString()); JarOutputStream jos = new JarOutputStream(fos); ZipEntry ze = new ZipEntry("test.jar.inside"); jos.putNextEntry(ze); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobClient.java index 9a2af0c7be9ea..9a2c744d8c656 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRCJCJobClient.java @@ -29,10 +29,17 @@ import org.apache.hadoop.mapreduce.TestMRJobClient; import org.apache.hadoop.mapreduce.tools.CLI; import org.apache.hadoop.util.Tool; + +import org.junit.BeforeClass; import org.junit.Ignore; @Ignore public class TestMRCJCJobClient extends TestMRJobClient { - + + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestMRCJCJobClient.class); + } + private String runJob() throws Exception { OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt")); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java deleted file mode 100644 index fa8dacf6dd507..0000000000000 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java +++ /dev/null @@ -1,327 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.util.Arrays; -import java.util.Collection; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.io.Writer; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.junit.Assert.*; - -@SuppressWarnings(value={"unchecked", "deprecation"}) -/** - * This test tests the support for a merge operation in Hadoop. The input files - * are already sorted on the key. This test implements an external - * MapOutputCollector implementation that just copies the records to different - * partitions while maintaining the sort order in each partition. The Hadoop - * framework's merge on the reduce side will merge the partitions created to - * generate the final output which is sorted on the key. - */ -@RunWith(Parameterized.class) -public class TestMRIntermediateDataEncryption { - private static final Logger LOG = - LoggerFactory.getLogger(TestMRIntermediateDataEncryption.class); - /** - * Use urandom to avoid the YarnChild process from hanging on low entropy - * systems. - */ - private static final String JVM_SECURITY_EGD_OPT = - "-Djava.security.egd=file:/dev/./urandom"; - // Where MR job's input will reside. - private static final Path INPUT_DIR = new Path("/test/input"); - // Where output goes. - private static final Path OUTPUT = new Path("/test/output"); - private static final int NUM_LINES = 1000; - private static MiniMRClientCluster mrCluster = null; - private static MiniDFSCluster dfsCluster = null; - private static FileSystem fs = null; - private static final int NUM_NODES = 2; - - private final String testTitle; - private final int numMappers; - private final int numReducers; - private final boolean isUber; - - /** - * List of arguments to run the JunitTest. - * @return - */ - @Parameterized.Parameters( - name = "{index}: TestMRIntermediateDataEncryption.{0} .. " - + "mappers:{1}, reducers:{2}, isUber:{3})") - public static Collection getTestParameters() { - return Arrays.asList(new Object[][]{ - {"testSingleReducer", 3, 1, false}, - {"testUberMode", 3, 1, true}, - {"testMultipleMapsPerNode", 8, 1, false}, - {"testMultipleReducers", 2, 4, false} - }); - } - - /** - * Initialized the parametrized JUnit test. - * @param testName the name of the unit test to be executed. - * @param mappers number of mappers in the tests. - * @param reducers number of the reducers. - * @param uberEnabled boolean flag for isUber - */ - public TestMRIntermediateDataEncryption(String testName, int mappers, - int reducers, boolean uberEnabled) { - this.testTitle = testName; - this.numMappers = mappers; - this.numReducers = reducers; - this.isUber = uberEnabled; - } - - @BeforeClass - public static void setupClass() throws Exception { - Configuration conf = new Configuration(); - conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); - - // Set the jvm arguments. - conf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, - JVM_SECURITY_EGD_OPT); - final String childJVMOpts = JVM_SECURITY_EGD_OPT - + " " + conf.get("mapred.child.java.opts", " "); - conf.set("mapred.child.java.opts", childJVMOpts); - - - // Start the mini-MR and mini-DFS clusters. - dfsCluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(NUM_NODES).build(); - mrCluster = - MiniMRClientClusterFactory.create( - TestMRIntermediateDataEncryption.class, NUM_NODES, conf); - mrCluster.start(); - } - - @AfterClass - public static void tearDown() throws IOException { - if (fs != null) { - fs.close(); - } - if (mrCluster != null) { - mrCluster.stop(); - } - if (dfsCluster != null) { - dfsCluster.shutdown(); - } - } - - @Before - public void setup() throws Exception { - LOG.info("Starting TestMRIntermediateDataEncryption#{}.......", testTitle); - fs = dfsCluster.getFileSystem(); - if (fs.exists(INPUT_DIR) && !fs.delete(INPUT_DIR, true)) { - throw new IOException("Could not delete " + INPUT_DIR); - } - if (fs.exists(OUTPUT) && !fs.delete(OUTPUT, true)) { - throw new IOException("Could not delete " + OUTPUT); - } - // Generate input. - createInput(fs, numMappers, NUM_LINES); - } - - @After - public void cleanup() throws IOException { - if (fs != null) { - if (fs.exists(OUTPUT)) { - fs.delete(OUTPUT, true); - } - if (fs.exists(INPUT_DIR)) { - fs.delete(INPUT_DIR, true); - } - } - } - - @Test(timeout=600000) - public void testMerge() throws Exception { - JobConf job = new JobConf(mrCluster.getConfig()); - job.setJobName("Test"); - JobClient client = new JobClient(job); - RunningJob submittedJob = null; - FileInputFormat.setInputPaths(job, INPUT_DIR); - FileOutputFormat.setOutputPath(job, OUTPUT); - job.set("mapreduce.output.textoutputformat.separator", " "); - job.setInputFormat(TextInputFormat.class); - job.setMapOutputKeyClass(Text.class); - job.setMapOutputValueClass(Text.class); - job.setOutputKeyClass(Text.class); - job.setOutputValueClass(Text.class); - job.setMapperClass(TestMRIntermediateDataEncryption.MyMapper.class); - job.setPartitionerClass( - TestMRIntermediateDataEncryption.MyPartitioner.class); - job.setOutputFormat(TextOutputFormat.class); - job.setNumReduceTasks(numReducers); - job.setInt("mapreduce.map.maxattempts", 1); - job.setInt("mapreduce.reduce.maxattempts", 1); - job.setInt("mapred.test.num_lines", NUM_LINES); - job.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, isUber); - job.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); - submittedJob = client.submitJob(job); - submittedJob.waitForCompletion(); - assertTrue("The submitted job is completed", submittedJob.isComplete()); - assertTrue("The submitted job is successful", submittedJob.isSuccessful()); - verifyOutput(fs, numMappers, NUM_LINES); - client.close(); - // wait for short period to cool down. - Thread.sleep(1000); - } - - private void createInput(FileSystem filesystem, int mappers, int numLines) - throws Exception { - for (int i = 0; i < mappers; i++) { - OutputStream os = - filesystem.create(new Path(INPUT_DIR, "input_" + i + ".txt")); - Writer writer = new OutputStreamWriter(os); - for (int j = 0; j < numLines; j++) { - // Create sorted key, value pairs. - int k = j + 1; - String formattedNumber = String.format("%09d", k); - writer.write(formattedNumber + " " + formattedNumber + "\n"); - } - writer.close(); - os.close(); - } - } - - private void verifyOutput(FileSystem fileSystem, - int mappers, int numLines) - throws Exception { - FSDataInputStream dis = null; - long numValidRecords = 0; - long numInvalidRecords = 0; - String prevKeyValue = "000000000"; - Path[] fileList = - FileUtil.stat2Paths(fileSystem.listStatus(OUTPUT, - new Utils.OutputFileUtils.OutputFilesFilter())); - for (Path outFile : fileList) { - try { - dis = fileSystem.open(outFile); - String record; - while((record = dis.readLine()) != null) { - // Split the line into key and value. - int blankPos = record.indexOf(" "); - String keyString = record.substring(0, blankPos); - String valueString = record.substring(blankPos+1); - // Check for sorted output and correctness of record. - if (keyString.compareTo(prevKeyValue) >= 0 - && keyString.equals(valueString)) { - prevKeyValue = keyString; - numValidRecords++; - } else { - numInvalidRecords++; - } - } - } finally { - if (dis != null) { - dis.close(); - dis = null; - } - } - } - // Make sure we got all input records in the output in sorted order. - assertEquals((long)(mappers * numLines), numValidRecords); - // Make sure there is no extraneous invalid record. - assertEquals(0, numInvalidRecords); - } - - /** - * A mapper implementation that assumes that key text contains valid integers - * in displayable form. - */ - public static class MyMapper extends MapReduceBase - implements Mapper { - private Text keyText; - private Text valueText; - - public MyMapper() { - keyText = new Text(); - valueText = new Text(); - } - - @Override - public void map(LongWritable key, Text value, - OutputCollector output, - Reporter reporter) throws IOException { - String record = value.toString(); - int blankPos = record.indexOf(" "); - keyText.set(record.substring(0, blankPos)); - valueText.set(record.substring(blankPos + 1)); - output.collect(keyText, valueText); - } - - public void close() throws IOException { - } - } - - /** - * Partitioner implementation to make sure that output is in total sorted - * order. We basically route key ranges to different reducers such that - * key values monotonically increase with the partition number. For example, - * in this test, the keys are numbers from 1 to 1000 in the form "000000001" - * to "000001000" in each input file. The keys "000000001" to "000000250" are - * routed to partition 0, "000000251" to "000000500" are routed to partition 1 - * and so on since we have 4 reducers. - */ - static class MyPartitioner implements Partitioner { - - private JobConf job; - - public MyPartitioner() { - } - - public void configure(JobConf job) { - this.job = job; - } - - public int getPartition(Text key, Text value, int numPartitions) { - int keyValue = 0; - try { - keyValue = Integer.parseInt(key.toString()); - } catch (NumberFormatException nfe) { - keyValue = 0; - } - int partitionNumber = (numPartitions * (Math.max(0, keyValue - 1))) / job - .getInt("mapred.test.num_lines", 10000); - return partitionNumber; - } - } -} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMROpportunisticMaps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMROpportunisticMaps.java index eed731ffd37e8..c2a966302cf66 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMROpportunisticMaps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMROpportunisticMaps.java @@ -26,6 +26,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.Test; @@ -79,7 +80,8 @@ public void doTest(int numMappers, int numReducers, int numNodes, MiniMRClientCluster mrCluster = null; FileSystem fileSystem = null; try { - Configuration conf = new Configuration(); + Configuration conf = + MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(null); // Start the mini-MR and mini-DFS clusters conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true); conf.setBoolean(YarnConfiguration. @@ -149,7 +151,6 @@ private void runMergeTest(JobConf job, FileSystem fileSystem, int job.setInt("mapreduce.map.maxattempts", 1); job.setInt("mapreduce.reduce.maxattempts", 1); job.setInt("mapred.test.num_lines", numLines); - job.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); try { submittedJob = client.submitJob(job); try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java index a23caa56290a2..4eb0beaad2a37 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java @@ -60,7 +60,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestMRTimelineEventHandling { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMerge.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMerge.java index a9e7f64c0b8cc..b8a16e146e9e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMerge.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMerge.java @@ -87,12 +87,12 @@ public void testMerge() throws Exception { // Run the test. runMergeTest(new JobConf(mrCluster.getConfig()), fileSystem); } finally { - if (dfsCluster != null) { - dfsCluster.shutdown(); - } if (mrCluster != null) { mrCluster.stop(); } + if (dfsCluster != null) { + dfsCluster.shutdown(); + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java index 3223acca22861..ed8ed61ff9b16 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java @@ -247,7 +247,7 @@ public void testNetworkedJob() throws Exception { QueueAclsInfo[] aai = client.getQueueAclsForCurrentUser(); assertEquals(2, aai.length); assertEquals("root", aai[0].getQueueName()); - assertEquals("default", aai[1].getQueueName()); + assertEquals("root.default", aai[1].getQueueName()); // test JobClient // The following asserts read JobStatus twice and ensure the returned diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index 0c42b3daab4ef..0bdc72121799b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -47,7 +47,7 @@ import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -127,7 +127,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * Test YarnRunner and make sure the client side plugin works diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java index 89ac3ea2edc5e..4a7c3283d48a3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java @@ -61,7 +61,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * Utilities used in unit test. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java index 0bf30c830b22f..dca39dfd71f99 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java @@ -22,7 +22,7 @@ import java.util.ArrayList; import java.util.Date; import java.util.List; -import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -30,7 +30,6 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; import org.apache.hadoop.util.Tool; @@ -99,6 +98,15 @@ static int printUsage() { */ enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN } + public static String generateSentenceWithRand(ThreadLocalRandom rand, + int noWords) { + StringBuffer sentence = new StringBuffer(words[rand.nextInt(words.length)]); + for (int i = 1; i < noWords; i++) { + sentence.append(" ").append(words[rand.nextInt(words.length)]); + } + return sentence.toString(); + } + static class RandomTextMapper extends Mapper { private long numBytesToWrite; @@ -106,7 +114,6 @@ static class RandomTextMapper extends Mapper { private int wordsInKeyRange; private int minWordsInValue; private int wordsInValueRange; - private Random random = new Random(); /** * Save the configuration value that we need to write the data. @@ -127,12 +134,13 @@ public void setup(Context context) { public void map(Text key, Text value, Context context) throws IOException,InterruptedException { int itemCount = 0; + ThreadLocalRandom rand = ThreadLocalRandom.current(); while (numBytesToWrite > 0) { // Generate the key/value - int noWordsKey = minWordsInKey + - (wordsInKeyRange != 0 ? random.nextInt(wordsInKeyRange) : 0); - int noWordsValue = minWordsInValue + - (wordsInValueRange != 0 ? random.nextInt(wordsInValueRange) : 0); + int noWordsKey = minWordsInKey + + (wordsInKeyRange != 0 ? rand.nextInt(wordsInKeyRange) : 0); + int noWordsValue = minWordsInValue + + (wordsInValueRange != 0 ? rand.nextInt(wordsInValueRange) : 0); Text keyWords = generateSentence(noWordsKey); Text valueWords = generateSentence(noWordsValue); @@ -154,13 +162,9 @@ public void map(Text key, Text value, } private Text generateSentence(int noWords) { - StringBuffer sentence = new StringBuffer(); - String space = " "; - for (int i=0; i < noWords; ++i) { - sentence.append(words[random.nextInt(words.length)]); - sentence.append(space); - } - return new Text(sentence.toString()); + String sentence = + generateSentenceWithRand(ThreadLocalRandom.current(), noWords); + return new Text(sentence); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java new file mode 100644 index 0000000000000..fbee7ef5c0f47 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRIntermediateDataEncryption.java @@ -0,0 +1,533 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.StringTokenizer; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; + +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.MiniMRClientCluster; +import org.apache.hadoop.mapred.MiniMRClientClusterFactory; +import org.apache.hadoop.mapred.Utils; + +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; +import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; +import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; +import org.apache.hadoop.mapreduce.security.IntermediateEncryptedStream; +import org.apache.hadoop.mapreduce.security.SpillCallBackPathsFinder; +import org.apache.hadoop.mapreduce.util.MRJobConfUtil; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.ToolRunner; + +/** + * This class tests the support of Intermediate data encryption + * (Spill data encryption). + * It starts by generating random input text file ({@link RandomTextWriter}) + * using the {@link ToolRunner}. + * A wordCount job consumes the generated input. The final job is configured in + * a way to guarantee that data is spilled. + * mbs-per-map specifies the amount of data (in MBs) to generate per map. + * By default, this is twice the value of mapreduce.task.io.sort.mb + * map-tasks specifies the number of map tasks to run. + * Steps of the unit test: + * 1- Generating random input text. + * 2- Run a job with encryption disabled. Get the checksum of the output file + * checkSumReference. + * 3- Run the job with encryption enabled. + * 4- Compare checkSumReference to the checksum of the job output. + * 5- If the job has multiple reducers, the test launches one final job to + * combine the output files into a single one. + * 6- Verify that the maps spilled files. + */ +@RunWith(Parameterized.class) +public class TestMRIntermediateDataEncryption { + public static final Logger LOG = + LoggerFactory.getLogger(TestMRIntermediateDataEncryption.class); + /** + * The number of bytes generated by the input generator. + */ + public static final long TOTAL_MBS_DEFAULT = 128L; + public static final long BLOCK_SIZE_DEFAULT = 32 * 1024 * 1024L; + public static final int INPUT_GEN_NUM_THREADS = 16; + public static final long TASK_SORT_IO_MB_DEFAULT = 128L; + public static final String JOB_DIR_PATH = "jobs-data-path"; + /** + * Directory of the test data. + */ + private static File testRootDir; + private static volatile BufferedWriter inputBufferedWriter; + private static Configuration commonConfig; + private static MiniDFSCluster dfsCluster; + private static MiniMRClientCluster mrCluster; + private static FileSystem fs; + private static FileChecksum checkSumReference; + private static Path jobInputDirPath; + private static long inputFileSize; + /** + * Test parameters. + */ + private final String testTitleName; + private final int numMappers; + private final int numReducers; + private final boolean isUber; + private Configuration config; + private Path jobOutputPath; + + /** + * Initialized the parametrized JUnit test. + * @param testName the name of the unit test to be executed. + * @param mappers number of mappers in the tests. + * @param reducers number of the reducers. + * @param uberEnabled boolean flag for isUber + */ + public TestMRIntermediateDataEncryption(String testName, int mappers, + int reducers, boolean uberEnabled) { + this.testTitleName = testName; + this.numMappers = mappers; + this.numReducers = reducers; + this.isUber = uberEnabled; + } + + /** + * List of arguments to run the JunitTest. + * @return + */ + @Parameterized.Parameters( + name = "{index}: TestMRIntermediateDataEncryption.{0} .. " + + "mappers:{1}, reducers:{2}, isUber:{3})") + public static Collection getTestParameters() { + return Arrays.asList(new Object[][]{ + {"testSingleReducer", 3, 1, false}, + {"testUberMode", 3, 1, true}, + {"testMultipleMapsPerNode", 8, 1, false}, + {"testMultipleReducers", 2, 4, false} + }); + } + + @BeforeClass + public static void setupClass() throws Exception { + // setup the test root directory + testRootDir = + GenericTestUtils.setupTestRootDir( + TestMRIntermediateDataEncryption.class); + // setup the base configurations and the clusters + final File dfsFolder = new File(testRootDir, "dfs"); + final Path jobsDirPath = new Path(JOB_DIR_PATH); + + commonConfig = createBaseConfiguration(); + dfsCluster = + new MiniDFSCluster.Builder(commonConfig, dfsFolder) + .numDataNodes(2).build(); + dfsCluster.waitActive(); + mrCluster = MiniMRClientClusterFactory.create( + TestMRIntermediateDataEncryption.class, 2, commonConfig); + mrCluster.start(); + fs = dfsCluster.getFileSystem(); + if (fs.exists(jobsDirPath) && !fs.delete(jobsDirPath, true)) { + throw new IOException("Could not delete JobsDirPath" + jobsDirPath); + } + fs.mkdirs(jobsDirPath); + jobInputDirPath = new Path(jobsDirPath, "in-dir"); + // run the input generator job. + Assert.assertEquals("Generating input should succeed", 0, + generateInputTextFile()); + // run the reference job + runReferenceJob(); + } + + @AfterClass + public static void tearDown() throws IOException { + // shutdown clusters + if (mrCluster != null) { + mrCluster.stop(); + } + if (dfsCluster != null) { + dfsCluster.shutdown(); + } + // make sure that generated input file is deleted + final File textInputFile = new File(testRootDir, "input.txt"); + if (textInputFile.exists()) { + Assert.assertTrue(textInputFile.delete()); + } + } + + /** + * Creates a configuration object setting the common properties before + * initializing the clusters. + * @return configuration to be used as a base for the unit tests. + */ + private static Configuration createBaseConfiguration() { + // Set the jvm arguments to enable intermediate encryption. + Configuration conf = + MRJobConfUtil.initEncryptedIntermediateConfigsForTesting(null); + // Set the temp directories a subDir of the test directory. + conf = MRJobConfUtil.setLocalDirectoriesConfigForTesting(conf, testRootDir); + conf.setLong("dfs.blocksize", BLOCK_SIZE_DEFAULT); + return conf; + } + + /** + * Creates a thread safe BufferedWriter to be used among the task generators. + * @return A synchronized BufferedWriter to the input file. + * @throws IOException opening a new {@link FileWriter}. + */ + private static synchronized BufferedWriter getTextInputWriter() + throws IOException { + if (inputBufferedWriter == null) { + final File textInputFile = new File(testRootDir, "input.txt"); + inputBufferedWriter = new BufferedWriter(new FileWriter(textInputFile)); + } + return inputBufferedWriter; + } + + /** + * Generates input text file of size TOTAL_MBS_DEFAULT. + * It creates a total INPUT_GEN_NUM_THREADS future tasks. + * + * @return the result of the input generation. 0 for success. + * @throws Exception during the I/O of job. + */ + private static int generateInputTextFile() throws Exception { + final File textInputFile = new File(testRootDir, "input.txt"); + final AtomicLong actualWrittenBytes = new AtomicLong(0); + // create INPUT_GEN_NUM_THREADS callables + final ExecutorService executor = + Executors.newFixedThreadPool(INPUT_GEN_NUM_THREADS); + //create a list to hold the Future object associated with Callable + final List> inputGenerators = new ArrayList<>(); + final Callable callableGen = new InputGeneratorTask(); + final long startTime = Time.monotonicNow(); + for (int i = 0; i < INPUT_GEN_NUM_THREADS; i++) { + //submit Callable tasks to be executed by thread pool + Future genFutureTask = executor.submit(callableGen); + inputGenerators.add(genFutureTask); + } + for (Future genFutureTask : inputGenerators) { + // print the return value of Future, notice the output delay in console + // because Future.get() waits for task to get completed + LOG.info("Received one task. Current total bytes: {}", + actualWrittenBytes.addAndGet(genFutureTask.get())); + } + getTextInputWriter().close(); + final long endTime = Time.monotonicNow(); + LOG.info("Finished generating input. Wrote {} bytes in {} seconds", + actualWrittenBytes.get(), ((endTime - startTime) * 1.0) / 1000); + executor.shutdown(); + // copy text file to HDFS deleting the source. + fs.mkdirs(jobInputDirPath); + Path textInputPath = + fs.makeQualified(new Path(jobInputDirPath, "input.txt")); + fs.copyFromLocalFile(true, new Path(textInputFile.getAbsolutePath()), + textInputPath); + if (!fs.exists(textInputPath)) { + // the file was not generated. Fail. + return 1; + } + // update the input size. + FileStatus[] fileStatus = + fs.listStatus(textInputPath); + inputFileSize = fileStatus[0].getLen(); + LOG.info("Text input file; path: {}, size: {}", + textInputPath, inputFileSize); + return 0; + } + + /** + * Runs a WordCount job with encryption disabled and stores the checksum of + * the output file. + * @throws Exception due to I/O errors. + */ + private static void runReferenceJob() throws Exception { + final String jobRefLabel = "job-reference"; + final Path jobRefDirPath = new Path(JOB_DIR_PATH, jobRefLabel); + if (fs.exists(jobRefDirPath) && !fs.delete(jobRefDirPath, true)) { + throw new IOException("Could not delete " + jobRefDirPath); + } + Assert.assertTrue(fs.mkdirs(jobRefDirPath)); + Path jobRefOutputPath = new Path(jobRefDirPath, "out-dir"); + Configuration referenceConf = new Configuration(commonConfig); + referenceConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, false); + Job jobReference = runWordCountJob(jobRefLabel, jobRefOutputPath, + referenceConf, 4, 1); + Assert.assertTrue(jobReference.isSuccessful()); + FileStatus[] fileStatusArr = + fs.listStatus(jobRefOutputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + Assert.assertEquals(1, fileStatusArr.length); + checkSumReference = fs.getFileChecksum(fileStatusArr[0].getPath()); + Assert.assertTrue(fs.delete(jobRefDirPath, true)); + } + + private static Job runWordCountJob(String postfixName, Path jOutputPath, + Configuration jConf, int mappers, int reducers) throws Exception { + Job job = Job.getInstance(jConf); + job.getConfiguration().setInt(MRJobConfig.NUM_MAPS, mappers); + job.setJarByClass(TestMRIntermediateDataEncryption.class); + job.setJobName("mr-spill-" + postfixName); + // Mapper configuration + job.setMapperClass(TokenizerMapper.class); + job.setInputFormatClass(TextInputFormat.class); + job.setCombinerClass(LongSumReducer.class); + FileInputFormat.setMinInputSplitSize(job, + (inputFileSize + mappers) / mappers); + // Reducer configuration + job.setReducerClass(LongSumReducer.class); + job.setNumReduceTasks(reducers); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(LongWritable.class); + // Set the IO paths for the job. + FileInputFormat.addInputPath(job, jobInputDirPath); + FileOutputFormat.setOutputPath(job, jOutputPath); + if (job.waitForCompletion(true)) { + FileStatus[] fileStatusArr = + fs.listStatus(jOutputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + for (FileStatus fStatus : fileStatusArr) { + LOG.info("Job: {} .. Output file {} .. Size = {}", + postfixName, fStatus.getPath(), fStatus.getLen()); + } + } + return job; + } + + /** + * Compares the checksum of the output file to the + * checkSumReference. + * If the job has a multiple reducers, the output files are combined by + * launching another job. + * @return true if the checksums are equal. + * @throws Exception if the output is missing or the combiner job fails. + */ + private boolean validateJobOutput() throws Exception { + Assert.assertTrue("Job Output path [" + jobOutputPath + "] should exist", + fs.exists(jobOutputPath)); + Path outputPath = jobOutputPath; + if (numReducers != 1) { + // combine the result into one file by running a combiner job + final String jobRefLabel = testTitleName + "-combine"; + final Path jobRefDirPath = new Path(JOB_DIR_PATH, jobRefLabel); + if (fs.exists(jobRefDirPath) && !fs.delete(jobRefDirPath, true)) { + throw new IOException("Could not delete " + jobRefDirPath); + } + fs.mkdirs(jobRefDirPath); + outputPath = new Path(jobRefDirPath, "out-dir"); + Configuration referenceConf = new Configuration(commonConfig); + referenceConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, + false); + Job combinerJob = Job.getInstance(referenceConf); + combinerJob.setJarByClass(TestMRIntermediateDataEncryption.class); + combinerJob.setJobName("mr-spill-" + jobRefLabel); + combinerJob.setMapperClass(CombinerJobMapper.class); + FileInputFormat.addInputPath(combinerJob, jobOutputPath); + // Reducer configuration + combinerJob.setReducerClass(LongSumReducer.class); + combinerJob.setNumReduceTasks(1); + combinerJob.setOutputKeyClass(Text.class); + combinerJob.setOutputValueClass(LongWritable.class); + // Set the IO paths for the job. + FileOutputFormat.setOutputPath(combinerJob, outputPath); + if (!combinerJob.waitForCompletion(true)) { + return false; + } + FileStatus[] fileStatusArr = + fs.listStatus(outputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + LOG.info("Job-Combination: {} .. Output file {} .. Size = {}", + jobRefDirPath, fileStatusArr[0].getPath(), fileStatusArr[0].getLen()); + } + // Get the output files of the job. + FileStatus[] fileStatusArr = + fs.listStatus(outputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + FileChecksum jobFileChecksum = + fs.getFileChecksum(fileStatusArr[0].getPath()); + return checkSumReference.equals(jobFileChecksum); + } + + @Before + public void setup() throws Exception { + LOG.info("Starting TestMRIntermediateDataEncryption#{}.......", + testTitleName); + final Path jobDirPath = new Path(JOB_DIR_PATH, testTitleName); + if (fs.exists(jobDirPath) && !fs.delete(jobDirPath, true)) { + throw new IOException("Could not delete " + jobDirPath); + } + fs.mkdirs(jobDirPath); + jobOutputPath = new Path(jobDirPath, "out-dir"); + // Set the configuration for the job. + config = new Configuration(commonConfig); + config.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, isUber); + config.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 1.0F); + // Set the configuration to make sure that we get spilled files. + long ioSortMb = TASK_SORT_IO_MB_DEFAULT; + config.setLong(MRJobConfig.IO_SORT_MB, ioSortMb); + long mapMb = Math.max(2 * ioSortMb, config.getInt(MRJobConfig.MAP_MEMORY_MB, + MRJobConfig.DEFAULT_MAP_MEMORY_MB)); + // Make sure the map tasks will spill to disk. + config.setLong(MRJobConfig.MAP_MEMORY_MB, mapMb); + config.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx" + (mapMb - 200) + "m"); + config.setInt(MRJobConfig.NUM_MAPS, numMappers); + // Max attempts have to be set to 1 when intermediate encryption is enabled. + config.setInt("mapreduce.map.maxattempts", 1); + config.setInt("mapreduce.reduce.maxattempts", 1); + } + + @Test + public void testWordCount() throws Exception { + LOG.info("........Starting main Job Driver #{} starting at {}.......", + testTitleName, Time.formatTime(System.currentTimeMillis())); + SpillCallBackPathsFinder spillInjector = + (SpillCallBackPathsFinder) IntermediateEncryptedStream + .setSpillCBInjector(new SpillCallBackPathsFinder()); + StringBuilder testSummary = + new StringBuilder(String.format("%n ===== test %s summary ======", + testTitleName)); + try { + long startTime = Time.monotonicNow(); + testSummary.append(String.format("%nJob %s started at %s", + testTitleName, Time.formatTime(System.currentTimeMillis()))); + Job job = runWordCountJob(testTitleName, jobOutputPath, config, + numMappers, numReducers); + Assert.assertTrue(job.isSuccessful()); + long endTime = Time.monotonicNow(); + testSummary.append(String.format("%nJob %s ended at %s", + job.getJobName(), Time.formatTime(System.currentTimeMillis()))); + testSummary.append(String.format("%n\tThe job took %.3f seconds", + (1.0 * (endTime - startTime)) / 1000)); + FileStatus[] fileStatusArr = + fs.listStatus(jobOutputPath, + new Utils.OutputFileUtils.OutputFilesFilter()); + for (FileStatus fStatus : fileStatusArr) { + long fileSize = fStatus.getLen(); + testSummary.append( + String.format("%n\tOutput file %s: %d", + fStatus.getPath(), fileSize)); + } + // Validate the checksum of the output. + Assert.assertTrue(validateJobOutput()); + // Check intermediate files and spilling. + long spilledRecords = + job.getCounters().findCounter(TaskCounter.SPILLED_RECORDS).getValue(); + Assert.assertTrue("Spill records must be greater than 0", + spilledRecords > 0); + Assert.assertFalse("The encrypted spilled files should not be empty.", + spillInjector.getEncryptedSpilledFiles().isEmpty()); + Assert.assertTrue("Invalid access to spill file positions", + spillInjector.getInvalidSpillEntries().isEmpty()); + } finally { + testSummary.append(spillInjector.getSpilledFileReport()); + LOG.info(testSummary.toString()); + IntermediateEncryptedStream.resetSpillCBInjector(); + } + } + + /** + * A callable implementation that generates a portion of the + * TOTAL_MBS_DEFAULT into {@link #inputBufferedWriter}. + */ + static class InputGeneratorTask implements Callable { + @Override + public Long call() throws Exception { + long bytesWritten = 0; + final ThreadLocalRandom rand = ThreadLocalRandom.current(); + final long totalBytes = 1024 * 1024 * TOTAL_MBS_DEFAULT; + final long bytesPerTask = totalBytes / INPUT_GEN_NUM_THREADS; + final String newLine = System.lineSeparator(); + final BufferedWriter writer = getTextInputWriter(); + while (bytesWritten < bytesPerTask) { + String sentence = + RandomTextWriter.generateSentenceWithRand(rand, rand.nextInt(5, 20)) + .concat(newLine); + writer.write(sentence); + bytesWritten += sentence.length(); + } + writer.flush(); + LOG.info("Task {} finished. Wrote {} bytes.", + Thread.currentThread().getName(), bytesWritten); + return bytesWritten; + } + } + + /** + * A Test tokenizer Mapper. + */ + public static class TokenizerMapper + extends Mapper { + + private final static LongWritable ONE = new LongWritable(1); + private final Text word = new Text(); + + public void map(Object key, Text value, + Context context) throws IOException, InterruptedException { + StringTokenizer itr = new StringTokenizer(value.toString()); + while (itr.hasMoreTokens()) { + word.set(itr.nextToken()); + context.write(word, ONE); + } + } + } + + /** + * A Mapper that reads the output of WordCount passing it to the reducer. + * It is used to combine the output of multiple reducer jobs. + */ + public static class CombinerJobMapper + extends Mapper { + private final LongWritable sum = new LongWritable(0); + private final Text word = new Text(); + public void map(Object key, Text value, + Context context) throws IOException, InterruptedException { + String[] line = value.toString().split("\\s+"); + sum.set(Long.parseLong(line[1])); + word.set(line[0]); + context.write(word, sum); + } + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java index f4ccc569af963..31b90aa0e506c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java @@ -30,6 +30,7 @@ import org.apache.hadoop.util.ToolRunner; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; +import org.junit.BeforeClass; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,6 +64,11 @@ public class TestMRJobClient extends ClusterMapReduceTestCase { private static final Logger LOG = LoggerFactory.getLogger(TestMRJobClient.class); + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestMRJobClient.class); + } + private Job runJob(Configuration conf) throws Exception { String input = "hello1\nhello2\nhello3\n"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java index 7a917faa59fbb..f51294876dc78 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java @@ -60,7 +60,7 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import com.google.common.collect.HashMultiset; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultiset; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; @@ -1626,22 +1626,15 @@ public void testSplitPlacementForCompressedFiles() throws Exception { */ @Test public void testMissingBlocks() throws Exception { - String namenode = null; - MiniDFSCluster dfs = null; - FileSystem fileSys = null; - String testName = "testMissingBlocks"; - try { - Configuration conf = new Configuration(); - conf.set("fs.hdfs.impl", MissingBlockFileSystem.class.getName()); - conf.setBoolean("dfs.replication.considerLoad", false); - dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1) - .build(); + final Configuration conf = new Configuration(); + conf.set("fs.hdfs.impl", MissingBlockFileSystem.class.getName()); + conf.setBoolean("dfs.replication.considerLoad", false); + try (MiniDFSCluster dfs = new MiniDFSCluster.Builder(conf) + .racks(rack1).hosts(hosts1).build()) { dfs.waitActive(); - namenode = (dfs.getFileSystem()).getUri().getHost() + ":" + - (dfs.getFileSystem()).getUri().getPort(); - - fileSys = dfs.getFileSystem(); + final FileSystem fileSys = + MissingBlockFileSystem.newInstance(dfs.getURI(), conf); if (!fileSys.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } @@ -1673,11 +1666,6 @@ public void testMissingBlocks() throws Exception { assertEquals(0, fileSplit.getOffset(1)); assertEquals(BLOCKSIZE, fileSplit.getLength(1)); assertEquals(hosts1[0], fileSplit.getLocations()[0]); - - } finally { - if (dfs != null) { - dfs.shutdown(); - } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java index d8c4b2a384117..6115c590d5fd6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java @@ -37,7 +37,6 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest; -import org.apache.hadoop.mapreduce.v2.hs.HistoryClientService; import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService; import org.apache.hadoop.mapreduce.v2.hs.JHSDelegationTokenSecretManager; import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer; @@ -92,22 +91,10 @@ protected void doSecureLogin(Configuration conf) throws IOException { @Override protected JHSDelegationTokenSecretManager createJHSSecretManager( Configuration conf, HistoryServerStateStoreService store) { - return new JHSDelegationTokenSecretManager(initialInterval, + return new JHSDelegationTokenSecretManager(initialInterval, maxLifetime, renewInterval, 3600000, store); } - - @Override - protected HistoryClientService createHistoryClientService() { - return new HistoryClientService(historyContext, - this.jhsDTSecretManager) { - @Override - protected void initializeWebApp(Configuration conf) { - // Don't need it, skip.; - } - }; - } }; -// final JobHistoryServer jobHistoryServer = jhServer; jobHistoryServer.init(conf); jobHistoryServer.start(); final MRClientProtocol hsService = jobHistoryServer.getClientService() diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/ssl/TestEncryptedShuffle.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/ssl/TestEncryptedShuffle.java index d870d25b9af75..ed80f658f6d96 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/ssl/TestEncryptedShuffle.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/ssl/TestEncryptedShuffle.java @@ -19,7 +19,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -31,58 +30,55 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.After; -import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.Assert; -import java.io.BufferedReader; import java.io.File; -import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.Writer; -import java.net.URL; public class TestEncryptedShuffle { - private static final String BASEDIR = - System.getProperty("test.build.dir", "target/test-dir") + "/" + - TestEncryptedShuffle.class.getSimpleName(); - - private String classpathDir; + private static File testRootDir; @BeforeClass public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); + testRootDir = + GenericTestUtils.setupTestRootDir(TestEncryptedShuffle.class); } @Before public void createCustomYarnClasspath() throws Exception { classpathDir = KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class); new File(classpathDir, "core-site.xml").delete(); + dfsFolder = new File(testRootDir, String.format("dfs-%d", + Time.monotonicNow())); } @After public void cleanUpMiniClusterSpecialConfig() throws Exception { new File(classpathDir, "core-site.xml").delete(); - String keystoresDir = new File(BASEDIR).getAbsolutePath(); + String keystoresDir = testRootDir.getAbsolutePath(); KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, classpathDir); } + private String classpathDir; private MiniDFSCluster dfsCluster = null; private MiniMRClientCluster mrCluster = null; + private File dfsFolder; private void startCluster(Configuration conf) throws Exception { if (System.getProperty("hadoop.log.dir") == null) { - System.setProperty("hadoop.log.dir", "target/test-dir"); + System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath()); } conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); @@ -92,7 +88,7 @@ private void startCluster(Configuration conf) throws Exception { YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) + File.pathSeparator + classpathDir; conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp); - dfsCluster = new MiniDFSCluster.Builder(conf).build(); + dfsCluster = new MiniDFSCluster.Builder(conf, dfsFolder).build(); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); @@ -129,7 +125,7 @@ private void encryptedShuffleWithCerts(boolean useClientCerts) throws Exception { try { Configuration conf = new Configuration(); - String keystoresDir = new File(BASEDIR).getAbsolutePath(); + String keystoresDir = testRootDir.getAbsolutePath(); String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java index 299383d76bf8a..aa85ab90d2cf5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java @@ -74,6 +74,7 @@ public MiniMRYarnCluster(String testName) { this(testName, 1); } + @SuppressWarnings("deprecation") public MiniMRYarnCluster(String testName, int noOfNMs) { this(testName, noOfNMs, false); } @@ -84,6 +85,10 @@ public MiniMRYarnCluster(String testName, int noOfNMs, boolean enableAHS) { addService(historyServerWrapper); } + public static String copyAppJarIntoTestDir(String testSubdir) { + return JarFinder.getJar(LocalContainerLauncher.class, testSubdir); + } + public static String getResolvedMRHistoryWebAppURLWithoutScheme( Configuration conf, boolean isSSLEnabled) { InetSocketAddress address = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java index 9fa8828771737..fa0c7d697641d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java @@ -35,6 +35,7 @@ import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.v2.api.records.JobId; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; @@ -43,6 +44,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.junit.Assume.assumeFalse; + public class TestMRJobsWithProfiler { private static final Logger LOG = @@ -107,6 +110,8 @@ public static void tearDown() { @Test (timeout = 150000) public void testDefaultProfiler() throws Exception { + assumeFalse("The hprof agent has been removed since Java 9. Skipping.", + Shell.isJavaVersionAtLeast(9)); LOG.info("Starting testDefaultProfiler"); testProfilerInternal(true); } @@ -132,13 +137,21 @@ private void testProfilerInternal(boolean useDefault) throws Exception { sleepConf.setProfileTaskRange(false, String.valueOf(PROFILED_TASK_ID)); if (!useDefault) { - // use hprof for map to profile.out - sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS, - "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n," - + "file=%s"); + if (Shell.isJavaVersionAtLeast(9)) { + // use JDK Flight Recorder + sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS, + "-XX:StartFlightRecording=dumponexit=true,filename=%s"); + sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, + "-XX:StartFlightRecording=dumponexit=true,filename=%s"); + } else { + // use hprof for map to profile.out + sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS, + "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n," + + "file=%s"); - // use Xprof for reduce to stdout - sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof"); + // use Xprof for reduce to stdout + sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof"); + } } sleepJob.setConf(sleepConf); @@ -211,6 +224,11 @@ private void testProfilerInternal(boolean useDefault) throws Exception { Assert.assertEquals(4, taLogDirs.size()); // all 4 attempts found + // Skip checking the contents because the JFR dumps binary files + if (Shell.isJavaVersionAtLeast(9)) { + return; + } + for (Map.Entry dirEntry : taLogDirs.entrySet()) { final TaskAttemptID tid = dirEntry.getKey(); final Path profilePath = new Path(dirEntry.getValue(), diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecution.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecution.java index fe21f0752430e..8527dc3c9ea32 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecution.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecution.java @@ -192,6 +192,46 @@ public void reduce(Text key, Iterable values, } } + public static class FailOnceMapper extends + Mapper { + + public void map(Object key, Text value, Context context) + throws IOException, InterruptedException { + TaskAttemptID taid = context.getTaskAttemptID(); + try{ + Thread.sleep(2000); + } catch(InterruptedException ie) { + // Ignore + } + // Fail mapper only for first attempt + if (taid.getId() == 0) { + throw new RuntimeException("Failing this mapper"); + } + + context.write(value, new IntWritable(1)); + } + } + + public static class FailOnceReducer extends + Reducer { + + public void reduce(Text key, Iterable values, + Context context) throws IOException, InterruptedException { + TaskAttemptID taid = context.getTaskAttemptID(); + try{ + Thread.sleep(2000); + } catch(InterruptedException ie) { + // Ignore + } + // Fail reduce only for first attempt + if (taid.getId() == 0) { + throw new RuntimeException("Failing this reducer"); + } + context.write(key, new IntWritable(0)); + } + } + + @Test public void testSpeculativeExecution() throws Exception { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { @@ -218,6 +258,30 @@ public void testSpeculativeExecution() throws Exception { Assert.assertEquals(0, counters.findCounter(JobCounter.NUM_FAILED_MAPS) .getValue()); + + /*------------------------------------------------------------------ + * Test that Map/Red does not speculate if MAP_SPECULATIVE and + * REDUCE_SPECULATIVE are both false. When map tasks fail once and time out, + * we shouldn't launch two simultaneous attempts. MAPREDUCE-7278 + * ----------------------------------------------------------------- + */ + job = runNonSpecFailOnceTest(); + + succeeded = job.waitForCompletion(true); + Assert.assertTrue(succeeded); + Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState()); + counters = job.getCounters(); + // We will have 4 total since 2 map tasks fail and relaunch attempt once + Assert.assertEquals(4, + counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue()); + Assert.assertEquals(4, + counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue()); + // Ensure no maps or reduces killed due to accidental speculation + Assert.assertEquals(0, + counters.findCounter(JobCounter.NUM_KILLED_MAPS).getValue()); + Assert.assertEquals(0, + counters.findCounter(JobCounter.NUM_KILLED_REDUCES).getValue()); + /*---------------------------------------------------------------------- * Test that Mapper speculates if MAP_SPECULATIVE is true and * REDUCE_SPECULATIVE is false. @@ -295,7 +359,48 @@ private Job runSpecTest(boolean mapspec, boolean redspec) // Delete output directory if it exists. try { - localFs.delete(TEST_OUT_DIR,true); + localFs.delete(TEST_OUT_DIR, true); + } catch (IOException e) { + // ignore + } + + // Creates the Job Configuration + job.addFileToClassPath(APP_JAR); // The AppMaster jar itself. + job.setMaxMapAttempts(2); + + job.submit(); + + return job; + } + + private Job runNonSpecFailOnceTest() + throws IOException, ClassNotFoundException, InterruptedException { + + Path first = createTempFile("specexec_map_input1", "a\nz"); + Path secnd = createTempFile("specexec_map_input2", "a\nz"); + + Configuration conf = mrCluster.getConfig(); + conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false); + conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false); + // Prevent blacklisting since tasks fail once + conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, false); + // Setting small task exit timeout values reproduces MAPREDUCE-7278 + conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT, 20); + conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS, 10); + Job job = Job.getInstance(conf); + job.setJarByClass(TestSpeculativeExecution.class); + job.setMapperClass(FailOnceMapper.class); + job.setReducerClass(FailOnceReducer.class); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(IntWritable.class); + job.setNumReduceTasks(2); + FileInputFormat.setInputPaths(job, first); + FileInputFormat.addInputPath(job, secnd); + FileOutputFormat.setOutputPath(job, TEST_OUT_DIR); + + // Delete output directory if it exists. + try { + localFs.delete(TEST_OUT_DIR, true); } catch (IOException e) { // ignore } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml index f36323eeb9255..8dc8089cb161d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml @@ -19,11 +19,11 @@ hadoop-mapreduce-client org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-mapreduce-client-nativetask - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce NativeTask @@ -71,6 +71,11 @@ assertj-core test + + org.lz4 + lz4-java + test + @@ -97,6 +102,8 @@ src/main/native/testData/* + **/lz4.h + **/lz4.c @@ -156,9 +163,9 @@ - - diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeMapOutputCollectorDelegator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeMapOutputCollectorDelegator.java index b4755fd10c6ff..4ec1f7a1c561c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeMapOutputCollectorDelegator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeMapOutputCollectorDelegator.java @@ -19,7 +19,7 @@ import java.io.IOException; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.RawComparator; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeRuntime.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeRuntime.java index 8d71ad4d6fdaf..311ee223b9c76 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeRuntime.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeRuntime.java @@ -20,7 +20,7 @@ import java.io.IOException; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputBuffer; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/buffer/ByteBufferDataWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/buffer/ByteBufferDataWriter.java index 3d8f78b66a73b..da09f59591bee 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/buffer/ByteBufferDataWriter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/buffer/ByteBufferDataWriter.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.mapred.nativetask.NativeDataTarget; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * DataOutputStream implementation which buffers data in a fixed-size diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/BytesUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/BytesUtil.java index 16f691919dba4..8d2ca323f9e16 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/BytesUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/BytesUtil.java @@ -18,8 +18,8 @@ package org.apache.hadoop.mapred.nativetask.util; -import com.google.common.primitives.Ints; -import com.google.common.primitives.Longs; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Longs; import org.apache.hadoop.classification.InterfaceAudience; @InterfaceAudience.Private diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ConfigUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ConfigUtil.java index 7870ac7e08708..5f7be806ed954 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ConfigUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ConfigUtil.java @@ -21,7 +21,7 @@ import java.util.List; import java.util.Map; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ReadWriteBuffer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ReadWriteBuffer.java index 03df01663c171..af2c496eb6c64 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ReadWriteBuffer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ReadWriteBuffer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.mapred.nativetask.util; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; @InterfaceAudience.Private diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/lz4/lz4.c similarity index 100% rename from hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/lz4/lz4.c diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/lz4/lz4.h similarity index 100% rename from hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/lz4/lz4.h diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java index 802259fe30adf..1fe8359a4ad5a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java @@ -21,7 +21,7 @@ import java.util.concurrent.TimeUnit; import java.util.zip.CRC32; -import com.google.common.primitives.Longs; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Longs; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVTest.java index 9ea6c5718ed3d..cee7675a4047f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVTest.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVTest.java @@ -41,8 +41,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Splitter; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @RunWith(Parameterized.class) public class KVTest { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/testutil/BytesFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/testutil/BytesFactory.java index 4df48fc867eff..9b46dfa6243bd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/testutil/BytesFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/testutil/BytesFactory.java @@ -19,9 +19,9 @@ import java.util.Random; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Ints; -import com.google.common.primitives.Longs; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Longs; import org.apache.hadoop.io.BooleanWritable; import org.apache.hadoop.io.ByteWritable; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestBytesUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestBytesUtil.java index 5bafa43c62f9c..5bae67b471eea 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestBytesUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestBytesUtil.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.mapred.nativetask.utils; -import com.google.common.primitives.Ints; -import com.google.common.primitives.Longs; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Longs; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml index aec04dbffb963..ac36e48d74312 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml @@ -19,11 +19,11 @@ hadoop-mapreduce-client org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-mapreduce-client-shuffle - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce Shuffle diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java index e9f0f34c69e95..9e8fc327b5200 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.io.RandomAccessFile; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.io.ReadaheadPool; import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest; import org.apache.hadoop.io.nativeio.NativeIO; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java index 4b2c8cbf20afd..deb37ea632f37 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java @@ -35,7 +35,7 @@ import org.jboss.netty.channel.DefaultFileRegion; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class FadvisedFileRegion extends DefaultFileRegion { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java index ffc65641a5cb2..1d5b475ea7a4f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java @@ -133,13 +133,13 @@ import org.eclipse.jetty.http.HttpHeader; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.cache.RemovalListener; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalListener; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.thirdparty.protobuf.ByteString; public class ShuffleHandler extends AuxiliaryService { @@ -153,6 +153,18 @@ public class ShuffleHandler extends AuxiliaryService { public static final String SHUFFLE_READAHEAD_BYTES = "mapreduce.shuffle.readahead.bytes"; public static final int DEFAULT_SHUFFLE_READAHEAD_BYTES = 4 * 1024 * 1024; + + public static final String MAX_WEIGHT = + "mapreduce.shuffle.pathcache.max-weight"; + public static final int DEFAULT_MAX_WEIGHT = 10 * 1024 * 1024; + + public static final String EXPIRE_AFTER_ACCESS_MINUTES = + "mapreduce.shuffle.pathcache.expire-after-access-minutes"; + public static final int DEFAULT_EXPIRE_AFTER_ACCESS_MINUTES = 5; + + public static final String CONCURRENCY_LEVEL = + "mapreduce.shuffle.pathcache.concurrency-level"; + public static final int DEFAULT_CONCURRENCY_LEVEL = 16; // pattern to identify errors related to the client closing the socket early // idea borrowed from Netty SslHandler @@ -837,18 +849,6 @@ public ChannelPipeline getPipeline() throws Exception { } class Shuffle extends SimpleChannelUpstreamHandler { - private static final String MAX_WEIGHT = - "mapreduce.shuffle.pathcache.max-weight"; - private static final int DEFAULT_MAX_WEIGHT = 10 * 1024 * 1024; - - private static final String EXPIRE_AFTER_ACCESS_MINUTES = - "mapreduce.shuffle.pathcache.expire-after-access-minutes"; - private static final int DEFAULT_EXPIRE_AFTER_ACCESS_MINUTES = 5; - - private static final String CONCURRENCY_LEVEL = - "mapreduce.shuffle.pathcache.concurrency-level"; - private static final int DEFAULT_CONCURRENCY_LEVEL = 16; - private final IndexCache indexCache; private final LoadingCache pathCache; @@ -1170,8 +1170,13 @@ protected void populateHeaders(List mapIds, String jobId, StringBuilder sb = new StringBuilder("shuffle for "); sb.append(jobId).append(" reducer ").append(reduce); sb.append(" length ").append(contentLength); - sb.append(" mappers: ").append(mapIds); - AUDITLOG.debug(sb.toString()); + if (AUDITLOG.isTraceEnabled()) { + // For trace level logging, append the list of mappers + sb.append(" mappers: ").append(mapIds); + AUDITLOG.trace(sb.toString()); + } else { + AUDITLOG.debug(sb.toString()); + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml index 6dd2fbe67cc8f..6d802e6783861 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml @@ -18,11 +18,11 @@ hadoop-mapreduce-client org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-mapreduce-client-uploader - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce Uploader diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java index 52ee38aaa935a..0676e12706bd7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java @@ -18,7 +18,7 @@ package org.apache.hadoop.mapred.uploader; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.Options; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java index f55cdefc70702..40ef8f6c38490 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/test/java/org/apache/hadoop/mapred/uploader/TestFrameworkUploader.java @@ -18,7 +18,7 @@ package org.apache.hadoop.mapred.uploader; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; import org.apache.commons.io.FileUtils; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index fdea2d3e6bcc7..04159c4aafe07 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-mapreduce-client - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce Client pom @@ -128,8 +128,8 @@ provided - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava provided @@ -157,10 +157,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${mr.basedir}/dev-support/findbugs-exclude.xml Max @@ -206,7 +205,7 @@ - javadoc + javadoc-no-fork prepare-package diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index d128e4b5242f6..040ff46f43948 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-mapreduce-examples - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce Examples Apache Hadoop MapReduce Examples jar @@ -108,8 +108,8 @@ provided - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava provided @@ -138,10 +138,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${mr.examples.basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java index 2bda89d36e073..f0f9a34f55444 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java @@ -53,7 +53,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * A map/reduce program that uses Bailey-Borwein-Plouffe to compute exact diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java index d1580ce18960a..26a3009918674 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java @@ -37,7 +37,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; public class WordMean extends Configured implements Tool { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java index 630c6ab894b82..9acf62bd17e24 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java @@ -39,7 +39,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; public class WordMedian extends Configured implements Tool { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java index 2ac6400e44854..2a7733b875c09 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java @@ -37,7 +37,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; public class WordStandardDeviation extends Configured implements Tool { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java index d4fe6dcdb461b..e2d034193beb9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java @@ -33,7 +33,7 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.*; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * Launch a distributed pentomino solver. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java index a835b6a6c9164..aa2df72af2805 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java @@ -21,7 +21,7 @@ import java.io.*; import java.util.*; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * This class uses the dancing links algorithm from Knuth to solve sudoku diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java index 60b36387b8f72..16273fd0baaa4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java @@ -34,7 +34,7 @@ import org.apache.hadoop.examples.pi.math.Bellard; import org.apache.hadoop.examples.pi.math.Bellard.Parameter; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** A class for parsing outputs */ public final class Parser { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java index e74c09194ffcc..ddbbf6334379d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java @@ -46,7 +46,7 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.util.concurrent.HadoopExecutors; /** Utility methods */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java index 3ad8f29d9ae3b..8d7e7a0a9713d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java @@ -44,7 +44,7 @@ long and(long mask) { return d0 & mask; } - /** Shift right operation (<<). */ + /** Shift right operation (>>). */ long shiftRight(int n) { return (d1 << (BITS_PER_LONG - n)) + (d0 >>> n); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java index 7a9e44b0353de..7998d4a8f6133 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; class TeraScheduler { private static final Logger LOG = diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py deleted file mode 100644 index 70725f833ea94..0000000000000 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/job_history_summary.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import sys - -pat = re.compile('(?P[^=]+)="(?P[^"]*)" *') -counterPat = re.compile('(?P[^:]+):(?P[^,]*),?') - -def parse(tail): - result = {} - for n,v in re.findall(pat, tail): - result[n] = v - return result - -mapStartTime = {} -mapEndTime = {} -reduceStartTime = {} -reduceShuffleTime = {} -reduceSortTime = {} -reduceEndTime = {} -reduceBytes = {} - -for line in sys.stdin: - words = line.split(" ",1) - event = words[0] - attrs = parse(words[1]) - if event == 'MapAttempt': - if attrs.has_key("START_TIME"): - mapStartTime[attrs["TASKID"]] = int(attrs["START_TIME"])/1000 - elif attrs.has_key("FINISH_TIME"): - mapEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000 - elif event == 'ReduceAttempt': - if attrs.has_key("START_TIME"): - reduceStartTime[attrs["TASKID"]] = int(attrs["START_TIME"]) / 1000 - elif attrs.has_key("FINISH_TIME"): - reduceShuffleTime[attrs["TASKID"]] = int(attrs["SHUFFLE_FINISHED"])/1000 - reduceSortTime[attrs["TASKID"]] = int(attrs["SORT_FINISHED"])/1000 - reduceEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000 - elif event == 'Task': - if attrs["TASK_TYPE"] == "REDUCE" and attrs.has_key("COUNTERS"): - for n,v in re.findall(counterPat, attrs["COUNTERS"]): - if n == "File Systems.HDFS bytes written": - reduceBytes[attrs["TASKID"]] = int(v) - -runningMaps = {} -shufflingReduces = {} -sortingReduces = {} -runningReduces = {} -startTime = min(reduce(min, mapStartTime.values()), - reduce(min, reduceStartTime.values())) -endTime = max(reduce(max, mapEndTime.values()), - reduce(max, reduceEndTime.values())) - -reduces = reduceBytes.keys() -reduces.sort() - -print "Name reduce-output-bytes shuffle-finish reduce-finish" -for r in reduces: - print r, reduceBytes[r], reduceShuffleTime[r] - startTime, - print reduceEndTime[r] - startTime - -print - -for t in range(startTime, endTime): - runningMaps[t] = 0 - shufflingReduces[t] = 0 - sortingReduces[t] = 0 - runningReduces[t] = 0 - -for map in mapStartTime.keys(): - for t in range(mapStartTime[map], mapEndTime[map]): - runningMaps[t] += 1 -for reduce in reduceStartTime.keys(): - for t in range(reduceStartTime[reduce], reduceShuffleTime[reduce]): - shufflingReduces[t] += 1 - for t in range(reduceShuffleTime[reduce], reduceSortTime[reduce]): - sortingReduces[t] += 1 - for t in range(reduceSortTime[reduce], reduceEndTime[reduce]): - runningReduces[t] += 1 - -print "time maps shuffle merge reduce" -for t in range(startTime, endTime): - print t - startTime, runningMaps[t], shufflingReduces[t], sortingReduces[t], - print runningReduces[t] diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java index d6f284e50f768..232c53f4d47cc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java @@ -53,32 +53,28 @@ public void testMultiplication() { verifyMultiplication(max, max); } - static void verifyRightShift(long a, long b) { + @Test + public void testRightShift() { + for(int i = 0; i < 1000; i++) { + final long a = nextPositiveLong(); + final long b = nextPositiveLong(); + verifyRightShift(a, b); + } + } + + private static void verifyRightShift(long a, long b) { final LongLong ll = new LongLong().set(a, b); final BigInteger bi = ll.toBigInteger(); - for (int i = 0; i < LongLong.SIZE >> 1; i++) { - final long result = ll.shiftRight(i) & MASK; - final long expected = bi.shiftRight(i).longValue() & MASK; - final String s = String.format( - "\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, - b); - Assert.assertEquals(s, expected, result); - } - final String s = String.format( "\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b); - //System.out.println(s); Assert.assertEquals(s, bi, ll.toBigInteger()); - } - @Test - public void testRightShift() { - for(int i = 0; i < 1000; i++) { - final long a = nextPositiveLong(); - final long b = nextPositiveLong(); - verifyMultiplication(a, b); + for (int i = 0; i < LongLong.SIZE >> 1; i++) { + final long result = ll.shiftRight(i) & MASK; + final long expected = bi.shiftRight(i).longValue() & MASK; + Assert.assertEquals(s, expected, result); } } } diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 67b3d95d8a633..643a767ac50ef 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-mapreduce - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT pom Apache Hadoop MapReduce https://hadoop.apache.org/ @@ -178,10 +178,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${mr.basedir}/dev-support/findbugs-exclude.xml Max @@ -299,12 +298,9 @@ - org.codehaus.mojo - findbugs-maven-plugin - - 2.3.2 + com.github.spotbugs + spotbugs-maven-plugin - true true diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml index faf8494100949..674bbb10a4083 100644 --- a/hadoop-maven-plugins/pom.xml +++ b/hadoop-maven-plugins/pom.xml @@ -19,7 +19,7 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-maven-plugins @@ -100,6 +100,10 @@ + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java index 5bf84c21fea7b..5d99d28aa802d 100644 --- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/resourcegz/ResourceGzMojo.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.maven.plugin.resourcegz; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.io.IOUtils; import org.apache.maven.plugin.AbstractMojo; import org.apache.maven.plugin.MojoExecutionException; diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml index 4f6b33e1456d2..7d64566244d25 100644 --- a/hadoop-minicluster/pom.xml +++ b/hadoop-minicluster/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-minicluster - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT jar Apache Hadoop Mini-Cluster diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index 8b3693bee7441..3f4a678c5e1fc 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-project-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Project Dist POM Apache Hadoop Project Dist POM pom @@ -40,7 +40,6 @@ UNDEF false - false false false @@ -90,11 +89,10 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml - true 2048 @@ -136,7 +134,7 @@ false - 3.2.1 + 3.2.2 -unstable @@ -158,7 +156,7 @@ - javadoc + javadoc-no-fork prepare-package @@ -341,7 +339,6 @@ --openssllib=${openssl.lib} --opensslbinbundle=${bundle.openssl.in.bin} --openssllibbundle=${bundle.openssl} - --snappybinbundle=${bundle.snappy.in.bin} --snappylib=${snappy.lib} --snappylibbundle=${bundle.snappy} --zstdbinbundle=${bundle.zstd.in.bin} diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index f6c4300aead8c..eb42719cfe47c 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -20,10 +20,10 @@ org.apache.hadoop hadoop-main - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Project POM Apache Hadoop Project POM pom @@ -31,18 +31,18 @@ - 2020 + 2021 false true true - 9.4.20.v20190813 + 9.4.35.v20201120 _ _ - 4 + 2 @@ -71,15 +71,15 @@ 1.9.13 - 2.10.3 - 2.10.3 + 2.10.5 + 2.10.5.1 - 4.5.6 - 4.4.10 + 4.5.13 + 4.4.13 - 1.7.25 + 1.7.30 1.2.17 @@ -91,21 +91,23 @@ 3.7.1 ${env.HADOOP_PROTOC_PATH} - 1.0.0 + 1.1.0-SNAPSHOT + ${hadoop-thirdparty.version} + ${hadoop-thirdparty.version} org.apache.hadoop.thirdparty ${hadoop-thirdparty-shaded-prefix}.protobuf + ${hadoop-thirdparty-shaded-prefix}.com.google.common 3.5.6 4.2.0 3.0.5 - 3.1.0-RC1 2.1.7 27.0-jre 4.0 2.9.9 - 1.60 + 1.68 2.0.0-M21 @@ -140,7 +142,9 @@ 4.1.0-incubating 3.2.4 3.10.6.Final - 4.1.45.Final + 4.1.50.Final + 1.1.8.2 + 1.7.1 0.5.1 @@ -167,7 +171,7 @@ ${maven-surefire-plugin.version} ${maven-surefire-plugin.version} - 2.5 + 3.1.0 3.1 2.5.1 2.6 @@ -181,9 +185,9 @@ 1.3.1 1.0-beta-1 900 - 1.11.563 + 1.11.901 2.3.4 - 1.6 + 1.11.2 2.1 0.7 1.5.1 @@ -194,7 +198,7 @@ ${hadoop.version} 1.5.4 - 1.16 + 1.26 1.4.8 2.0.2 4.12 @@ -206,6 +210,7 @@ 1.5.6 7.7.0 1.0.7.Final + 5.3.0 @@ -231,6 +236,11 @@ hadoop-shaded-protobuf_3_7 ${hadoop-thirdparty-protobuf.version} + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + ${hadoop-thirdparty-guava.version} + org.apache.hadoop hadoop-assemblies @@ -642,6 +652,12 @@ ${hadoop.version} + + org.apache.hadoop + hadoop-cos + ${hadoop.version} + + org.apache.hadoop hadoop-kms @@ -1089,12 +1105,12 @@ org.codehaus.woodstox stax2-api - 3.1.4 + 4.2.1 com.fasterxml.woodstox woodstox-core - 5.0.3 + ${woodstox.version} org.codehaus.jackson @@ -1159,12 +1175,20 @@ org.mock-server mockserver-netty - 3.9.2 + 5.10 javax.servlet javax.servlet-api + + io.swagger.parser.v3 + swagger-parser + + + org.hamcrest + hamcrest-core + @@ -1180,7 +1204,7 @@ org.apache.ant ant - 1.8.1 + 1.10.9 com.google.re2j @@ -1305,6 +1329,18 @@ jline jline + + io.netty + netty-all + + + io.netty + netty-handler + + + io.netty + netty-transport-native-epoll + @@ -1384,7 +1420,7 @@ com.microsoft.azure azure-storage - 7.0.0 + 7.0.1 @@ -1421,6 +1457,12 @@ + + com.qcloud + cos_api-bundle + 5.6.19 + + org.apache.curator curator-recipes @@ -1672,9 +1714,9 @@ 1.0.13 - javax.activation - javax.activation-api - 1.2.0 + jakarta.activation + jakarta.activation-api + 1.2.1 javax.annotation @@ -1686,6 +1728,16 @@ jna ${jna.version} + + org.xerial.snappy + snappy-java + ${snappy-java.version} + + + org.lz4 + lz4-java + ${lz4-java.version} + @@ -1775,6 +1827,7 @@ maven-javadoc-plugin ${maven-javadoc-plugin.version} + all -Xmaxwarns 10000 @@ -1785,18 +1838,6 @@ maven-war-plugin ${maven-war-plugin.version} - - org.codehaus.mojo - findbugs-maven-plugin - ${findbugs.version} - - - com.github.spotbugs - spotbugs - ${spotbugs.version} - - - org.codehaus.mojo make-maven-plugin @@ -1984,6 +2025,27 @@ + + replace-guava + process-sources + + replace + + + false + ${basedir} + + src/main/java/**/*.java + src/test/java/**/*.java + + + + ([^\.])com.google.common + $1${hadoop-thirdparty-shaded-guava-prefix} + + + + @@ -2009,10 +2071,6 @@ - - org.codehaus.mojo - findbugs-maven-plugin - org.apache.maven.plugins maven-antrun-plugin @@ -2093,7 +2151,7 @@ - true + false @@ -2130,6 +2188,10 @@ + + com.google.code.maven-replacer-plugin + replacer + @@ -2169,7 +2231,6 @@ file:/dev/urandom - true true true @@ -2181,7 +2242,6 @@ - ${env.PATH};${hadoop.common.build.dir}/bin;${snappy.lib} ${env.PATH};${hadoop.common.build.dir}/bin;${zstd.lib} ${env.PATH};${hadoop.common.build.dir}/bin;${openssl.lib} ${env.PATH};${hadoop.common.build.dir}/bin;${isal.lib} diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index b87ffea2b81ab..e2d149da2eb94 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -85,6 +85,7 @@ + @@ -218,6 +219,8 @@ + + diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml index 6bd540840a4c4..b169e1cfc172d 100644 --- a/hadoop-tools/hadoop-aliyun/pom.xml +++ b/hadoop-tools/hadoop-aliyun/pom.xml @@ -18,7 +18,7 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-aliyun @@ -58,10 +58,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java index a48fde6eaa7b2..d9a3f5830dab9 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java @@ -19,10 +19,10 @@ package org.apache.hadoop.fs.aliyun.oss; import com.aliyun.oss.model.PartETag; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.conf.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java index be1a8edb0da49..759484e4239fa 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java @@ -27,8 +27,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -78,8 +77,8 @@ public class AliyunOSSFileSystem extends FileSystem { private int maxKeys; private int maxReadAheadPartNumber; private int maxConcurrentCopyTasksPerDir; - private ListeningExecutorService boundedThreadPool; - private ListeningExecutorService boundedCopyThreadPool; + private ExecutorService boundedThreadPool; + private ExecutorService boundedCopyThreadPool; private static final PathFilter DEFAULT_FILTER = new PathFilter() { @Override diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java index c38f6fcaa229e..9a89a086df00d 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java @@ -24,7 +24,7 @@ import java.util.Queue; import java.util.concurrent.ExecutorService; -import com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java index 3e02d7f2f6f0e..a6d59c2d0c4ec 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java @@ -23,7 +23,7 @@ import java.net.URI; import com.aliyun.oss.common.auth.CredentialsProvider; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.LocalDirAllocator; diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml index d8b7150a839ef..a70b7b7842148 100644 --- a/hadoop-tools/hadoop-archive-logs/pom.xml +++ b/hadoop-tools/hadoop-archive-logs/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-archive-logs - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Archive Logs Apache Hadoop Archive Logs jar @@ -86,8 +86,8 @@ provided - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava provided @@ -194,10 +194,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java index 12ce47111a2f7..ce09b76a6c8cf 100644 --- a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java +++ b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java @@ -18,7 +18,7 @@ package org.apache.hadoop.tools; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; diff --git a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java index 381cc4b204884..29aadcd3eeb80 100644 --- a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java +++ b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java @@ -18,7 +18,7 @@ package org.apache.hadoop.tools; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml index 47c1251046af5..2ad6399c8fd2b 100644 --- a/hadoop-tools/hadoop-archives/pom.xml +++ b/hadoop-tools/hadoop-archives/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-archives - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Archives Apache Hadoop Archives jar diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java index da5ad6cd19793..471f35494497e 100644 --- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java @@ -72,7 +72,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * a archive creation utility. diff --git a/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml index b51053603fa7b..9d598be856a16 100644 --- a/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml +++ b/hadoop-tools/hadoop-aws/dev-support/findbugs-exclude.xml @@ -74,4 +74,20 @@ + + + + + + + + + + + + diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 21c91dd5ddfda..41220b839a189 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -19,11 +19,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-aws - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Amazon Web Services support This module contains code to support integration with Amazon Web Services. @@ -51,6 +51,10 @@ 200000 + + false + + @@ -122,6 +126,9 @@ ${fs.s3a.scale.test.huge.filesize} ${fs.s3a.scale.test.huge.partitionsize} ${fs.s3a.scale.test.timeout} + + ${fs.s3a.directory.marker.retention} + ${fs.s3a.directory.marker.audit} @@ -162,6 +169,7 @@ ${fs.s3a.s3guard.test.enabled} ${fs.s3a.s3guard.test.authoritative} ${fs.s3a.s3guard.test.implementation} + ${fs.s3a.directory.marker.retention} ${test.integration.timeout} @@ -188,8 +196,12 @@ **/ITestDynamoDBMetadataStoreScale.java **/ITestTerasort*.java + + **/ITestMarkerToolRootOperations.java **/ITestS3GuardDDBRootOperations.java + + **/ITestAggregateIOStatistics.java @@ -214,6 +226,9 @@ ${fs.s3a.s3guard.test.enabled} ${fs.s3a.s3guard.test.implementation} ${fs.s3a.s3guard.test.authoritative} + + ${fs.s3a.directory.marker.retention} + ${fs.s3a.directory.marker.audit} @@ -229,9 +244,15 @@ **/ITestTerasort*.java + + + **/ITestMarkerToolRootOperations.java **/ITestS3AContractRootDir.java **/ITestS3GuardDDBRootOperations.java + + **/ITestAggregateIOStatistics.java @@ -268,6 +289,9 @@ ${fs.s3a.s3guard.test.enabled} ${fs.s3a.s3guard.test.implementation} ${fs.s3a.s3guard.test.authoritative} + + ${fs.s3a.directory.marker.retention} + ${fs.s3a.directory.marker.audit} ${fs.s3a.scale.test.timeout} @@ -331,15 +355,52 @@ + + + keep-markers + + + markers + keep + + + + keep + + + + delete-markers + + + markers + delete + + + + delete + + + + auth-markers + + + markers + authoritative + + + + authoritative + + + - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSClientIOException.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSClientIOException.java index 22afb01340d4b..f31f3c79b355e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSClientIOException.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSClientIOException.java @@ -20,7 +20,7 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.SdkBaseException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java index 542e6f4871cf3..224cdeffe7899 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java @@ -31,8 +31,8 @@ import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AnonymousAWSCredentials; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index 0ca4aa01a7e5a..67e5c17e56392 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -160,9 +160,34 @@ private Constants() { DEFAULT_SSL_CHANNEL_MODE = DelegatingSSLSocketFactory.SSLChannelMode.Default_JSSE; - //use a custom endpoint? + /** + * Endpoint. For v4 signing and/or better performance, + * this should be the specific endpoint of the region + * in which the bucket is hosted. + */ public static final String ENDPOINT = "fs.s3a.endpoint"; + /** + * Default value of s3 endpoint: {@value}. + * It tells the AWS client to work it out by asking the central + * endpoint where the bucket lives; caching that + * value in the client for the life of the process. + *

    + * Note: previously this constant was defined as + * {@link #CENTRAL_ENDPOINT}, however the actual + * S3A client code used "" as the default when + * {@link #ENDPOINT} was unset. + * As core-default.xml also set the endpoint to "", + * the empty string has long been the real + * default value. + */ + public static final String DEFAULT_ENDPOINT = ""; + + /** + * The central endpoint :{@value}. + */ + public static final String CENTRAL_ENDPOINT = "s3.amazonaws.com"; + //Enable path style access? Overrides default virtual hosting public static final String PATH_STYLE_ACCESS = "fs.s3a.path.style.access"; @@ -543,6 +568,13 @@ private Constants() { @InterfaceStability.Unstable public static final String INPUT_FADV_RANDOM = "random"; + /** + * Gauge name for the input policy : {@value}. + * This references an enum currently exclusive to the S3A stream. + */ + public static final String STREAM_READ_GAUGE_INPUT_POLICY = + "stream_read_gauge_input_policy"; + @InterfaceAudience.Private @InterfaceStability.Unstable public static final String S3_CLIENT_FACTORY_IMPL = @@ -740,9 +772,9 @@ private Constants() { * The warn level if S3Guard is disabled. */ public static final String S3GUARD_DISABLED_WARN_LEVEL - = "org.apache.hadoop.fs.s3a.s3guard.disabled.warn.level"; + = "fs.s3a.s3guard.disabled.warn.level"; public static final String DEFAULT_S3GUARD_DISABLED_WARN_LEVEL = - "INFORM"; + "SILENT"; /** * Inconsistency (visibility delay) injection settings. @@ -947,4 +979,98 @@ private Constants() { * Value: {@value} seconds. */ public static final int THREAD_POOL_SHUTDOWN_DELAY_SECONDS = 30; + + /** + * Policy for directory markers. + * This is a new feature of HADOOP-13230 which addresses + * some scale, performance and permissions issues -but + * at the risk of backwards compatibility. + */ + public static final String DIRECTORY_MARKER_POLICY = + "fs.s3a.directory.marker.retention"; + + /** + * Delete directory markers. This is the backwards compatible option. + * Value: {@value}. + */ + public static final String DIRECTORY_MARKER_POLICY_DELETE = + "delete"; + + /** + * Retain directory markers. + * Value: {@value}. + */ + public static final String DIRECTORY_MARKER_POLICY_KEEP = + "keep"; + + /** + * Retain directory markers in authoritative directory trees only. + * Value: {@value}. + */ + public static final String DIRECTORY_MARKER_POLICY_AUTHORITATIVE = + "authoritative"; + + /** + * Default retention policy: {@value}. + */ + public static final String DEFAULT_DIRECTORY_MARKER_POLICY = + DIRECTORY_MARKER_POLICY_DELETE; + + + /** + * {@code PathCapabilities} probe to verify that an S3A Filesystem + * has the changes needed to safely work with buckets where + * directoy markers have not been deleted. + * Value: {@value}. + */ + public static final String STORE_CAPABILITY_DIRECTORY_MARKER_AWARE + = "fs.s3a.capability.directory.marker.aware"; + + /** + * {@code PathCapabilities} probe to indicate that the filesystem + * keeps directory markers. + * Value: {@value}. + */ + public static final String STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_KEEP + = "fs.s3a.capability.directory.marker.policy.keep"; + + /** + * {@code PathCapabilities} probe to indicate that the filesystem + * deletes directory markers. + * Value: {@value}. + */ + public static final String STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE + = "fs.s3a.capability.directory.marker.policy.delete"; + + /** + * {@code PathCapabilities} probe to indicate that the filesystem + * keeps directory markers in authoritative paths only. + * Value: {@value}. + */ + public static final String + STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE = + "fs.s3a.capability.directory.marker.policy.authoritative"; + + /** + * {@code PathCapabilities} probe to indicate that a path + * keeps directory markers. + * Value: {@value}. + */ + public static final String STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP + = "fs.s3a.capability.directory.marker.action.keep"; + + /** + * {@code PathCapabilities} probe to indicate that a path + * deletes directory markers. + * Value: {@value}. + */ + public static final String STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE + = "fs.s3a.capability.directory.marker.action.delete"; + + /** + * To comply with the XAttr rules, all headers of the object retrieved + * through the getXAttr APIs have the prefix: {@value}. + */ + public static final String XA_HEADER_PREFIX = "header."; + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java index 276961bf8b7e1..ae50bd1459bcd 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java @@ -22,43 +22,63 @@ import java.net.URI; import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.handlers.RequestHandler2; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.S3ClientOptions; +import com.amazonaws.services.s3.internal.ServiceUtils; +import com.amazonaws.util.AwsHostNameUtils; +import com.amazonaws.util.RuntimeHttpUtils; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.s3a.statistics.impl.AwsStatisticsCollector; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING; -import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT; import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING_DEFAULT; -import static org.apache.hadoop.fs.s3a.Constants.PATH_STYLE_ACCESS; /** * The default {@link S3ClientFactory} implementation. - * This which calls the AWS SDK to configure and create an - * {@link AmazonS3Client} that communicates with the S3 service. + * This calls the AWS SDK to configure and create an + * {@code AmazonS3Client} that communicates with the S3 service. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class DefaultS3ClientFactory extends Configured implements S3ClientFactory { - protected static final Logger LOG = S3AFileSystem.LOG; + private static final String S3_SERVICE_NAME = "s3"; + /** + * Subclasses refer to this. + */ + protected static final Logger LOG = + LoggerFactory.getLogger(DefaultS3ClientFactory.class); + + /** + * Create the client by preparing the AwsConf configuration + * and then invoking {@code buildAmazonS3Client()}. + */ @Override - public AmazonS3 createS3Client(URI name, - final String bucket, - final AWSCredentialsProvider credentials, - final String userAgentSuffix) throws IOException { + public AmazonS3 createS3Client( + final URI uri, + final S3ClientCreationParameters parameters) throws IOException { Configuration conf = getConf(); final ClientConfiguration awsConf = S3AUtils - .createAwsConf(conf, bucket, Constants.AWS_SERVICE_IDENTIFIER_S3); + .createAwsConf(conf, + uri.getHost(), + Constants.AWS_SERVICE_IDENTIFIER_S3); + // add any headers + parameters.getHeaders().forEach((h, v) -> + awsConf.addHeader(h, v)); // When EXPERIMENTAL_AWS_INTERNAL_THROTTLING is false // throttling is explicitly disabled on the S3 client so that @@ -69,39 +89,78 @@ public AmazonS3 createS3Client(URI name, conf.getBoolean(EXPERIMENTAL_AWS_INTERNAL_THROTTLING, EXPERIMENTAL_AWS_INTERNAL_THROTTLING_DEFAULT)); - if (!StringUtils.isEmpty(userAgentSuffix)) { - awsConf.setUserAgentSuffix(userAgentSuffix); + if (!StringUtils.isEmpty(parameters.getUserAgentSuffix())) { + awsConf.setUserAgentSuffix(parameters.getUserAgentSuffix()); } - return configureAmazonS3Client( - newAmazonS3Client(credentials, awsConf), conf); + + return buildAmazonS3Client( + awsConf, + parameters); } /** - * Wrapper around constructor for {@link AmazonS3} client. - * Override this to provide an extended version of the client - * @param credentials credentials to use + * Use the Builder API to create an AWS S3 client. + *

    + * This has a more complex endpoint configuration mechanism + * which initially caused problems; the + * {@code withForceGlobalBucketAccessEnabled(true)} + * command is critical here. * @param awsConf AWS configuration - * @return new AmazonS3 client + * @param parameters parameters + * @return new AmazonS3 client */ - protected AmazonS3 newAmazonS3Client( - AWSCredentialsProvider credentials, ClientConfiguration awsConf) { - return new AmazonS3Client(credentials, awsConf); + protected AmazonS3 buildAmazonS3Client( + final ClientConfiguration awsConf, + final S3ClientCreationParameters parameters) { + AmazonS3ClientBuilder b = AmazonS3Client.builder(); + b.withCredentials(parameters.getCredentialSet()); + b.withClientConfiguration(awsConf); + b.withPathStyleAccessEnabled(parameters.isPathStyleAccess()); + + if (parameters.getMetrics() != null) { + b.withMetricsCollector( + new AwsStatisticsCollector(parameters.getMetrics())); + } + if (parameters.getRequestHandlers() != null) { + b.withRequestHandlers( + parameters.getRequestHandlers().toArray(new RequestHandler2[0])); + } + if (parameters.getMonitoringListener() != null) { + b.withMonitoringListener(parameters.getMonitoringListener()); + } + + // endpoint set up is a PITA + AwsClientBuilder.EndpointConfiguration epr + = createEndpointConfiguration(parameters.getEndpoint(), + awsConf); + if (epr != null) { + // an endpoint binding was constructed: use it. + b.withEndpointConfiguration(epr); + } else { + // no idea what the endpoint is, so tell the SDK + // to work it out at the cost of an extra HEAD request + b.withForceGlobalBucketAccessEnabled(true); + } + final AmazonS3 client = b.build(); + return client; } /** - * Configure S3 client from the Hadoop configuration. - * + * Configure classic S3 client. + *

    * This includes: endpoint, Path Access and possibly other * options. * - * @param conf Hadoop configuration + * @param s3 S3 Client. + * @param endPoint s3 endpoint, may be empty + * @param pathStyleAccess enable path style access? * @return S3 client * @throws IllegalArgumentException if misconfigured */ - private static AmazonS3 configureAmazonS3Client(AmazonS3 s3, - Configuration conf) + protected static AmazonS3 configureAmazonS3Client(AmazonS3 s3, + final String endPoint, + final boolean pathStyleAccess) throws IllegalArgumentException { - String endPoint = conf.getTrimmed(ENDPOINT, ""); if (!endPoint.isEmpty()) { try { s3.setEndpoint(endPoint); @@ -111,31 +170,6 @@ private static AmazonS3 configureAmazonS3Client(AmazonS3 s3, throw new IllegalArgumentException(msg, e); } } - return applyS3ClientOptions(s3, conf); - } - - /** - * Perform any tuning of the {@code S3ClientOptions} settings based on - * the Hadoop configuration. - * This is different from the general AWS configuration creation as - * it is unique to S3 connections. - * - * The {@link Constants#PATH_STYLE_ACCESS} option enables path-style access - * to S3 buckets if configured. By default, the - * behavior is to use virtual hosted-style access with URIs of the form - * {@code http://bucketname.s3.amazonaws.com} - * Enabling path-style access and a - * region-specific endpoint switches the behavior to use URIs of the form - * {@code http://s3-eu-west-1.amazonaws.com/bucketname}. - * It is common to use this when connecting to private S3 servers, as it - * avoids the need to play with DNS entries. - * @param s3 S3 client - * @param conf Hadoop configuration - * @return the S3 client - */ - private static AmazonS3 applyS3ClientOptions(AmazonS3 s3, - Configuration conf) { - final boolean pathStyleAccess = conf.getBoolean(PATH_STYLE_ACCESS, false); if (pathStyleAccess) { LOG.debug("Enabling path style access!"); s3.setS3ClientOptions(S3ClientOptions.builder() @@ -144,4 +178,54 @@ private static AmazonS3 applyS3ClientOptions(AmazonS3 s3, } return s3; } + + /** + * Given an endpoint string, return an endpoint config, or null, if none + * is needed. + *

    + * This is a pretty painful piece of code. It is trying to replicate + * what AwsClient.setEndpoint() does, because you can't + * call that setter on an AwsClient constructed via + * the builder, and you can't pass a metrics collector + * down except through the builder. + *

    + * Note also that AWS signing is a mystery which nobody fully + * understands, especially given all problems surface in a + * "400 bad request" response, which, like all security systems, + * provides minimal diagnostics out of fear of leaking + * secrets. + * + * @param endpoint possibly null endpoint. + * @param awsConf config to build the URI from. + * @return a configuration for the S3 client builder. + */ + @VisibleForTesting + public static AwsClientBuilder.EndpointConfiguration + createEndpointConfiguration( + final String endpoint, final ClientConfiguration awsConf) { + LOG.debug("Creating endpoint configuration for {}", endpoint); + if (endpoint == null || endpoint.isEmpty()) { + // the default endpoint...we should be using null at this point. + LOG.debug("Using default endpoint -no need to generate a configuration"); + return null; + } + + final URI epr = RuntimeHttpUtils.toUri(endpoint, awsConf); + LOG.debug("Endpoint URI = {}", epr); + + String region; + if (!ServiceUtils.isS3USStandardEndpoint(endpoint)) { + LOG.debug("Endpoint {} is not the default; parsing", epr); + region = AwsHostNameUtils.parseRegion( + epr.getHost(), + S3_SERVICE_NAME); + } else { + // US-east, set region == null. + LOG.debug("Endpoint {} is the standard one; declare region as null", epr); + region = null; + } + LOG.debug("Region for endpoint {}, URI {} is determined as {}", + endpoint, epr, region); + return new AwsClientBuilder.EndpointConfiguration(endpoint, region); + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/FailureInjectionPolicy.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/FailureInjectionPolicy.java index 8cd6036b0c00f..f6bfc9aa92802 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/FailureInjectionPolicy.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/FailureInjectionPolicy.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentAmazonS3Client.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentAmazonS3Client.java index 34c043be9cb73..e54c21d853e6a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentAmazonS3Client.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentAmazonS3Client.java @@ -53,7 +53,7 @@ import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -330,7 +330,9 @@ private boolean isDescendant(String parent, String child, boolean recursive) { } else { Path actualParentPath = new Path(child).getParent(); Path expectedParentPath = new Path(parent); - return actualParentPath.equals(expectedParentPath); + // children which are directory markers are excluded here + return actualParentPath.equals(expectedParentPath) + && !child.endsWith("/"); } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java index 932c472f5bea2..c11581f1d5d78 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/InconsistentS3ClientFactory.java @@ -19,7 +19,6 @@ package org.apache.hadoop.fs.s3a; import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import org.apache.hadoop.classification.InterfaceAudience; @@ -30,22 +29,25 @@ * This client is for testing only; it is in the production * {@code hadoop-aws} module to enable integration tests to use this * just by editing the Hadoop configuration used to bring up the client. + * + * The factory uses the older constructor-based instantiation/configuration + * of the client, so does not wire up metrics, handlers etc. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class InconsistentS3ClientFactory extends DefaultS3ClientFactory { - /** - * Create the inconsistent client. - * Logs a warning that this is being done. - * @param credentials credentials to use - * @param awsConf AWS configuration - * @return an inconsistent client. - */ @Override - protected AmazonS3 newAmazonS3Client(AWSCredentialsProvider credentials, - ClientConfiguration awsConf) { + protected AmazonS3 buildAmazonS3Client( + final ClientConfiguration awsConf, + final S3ClientCreationParameters parameters) { LOG.warn("** FAILURE INJECTION ENABLED. Do not run in production! **"); - return new InconsistentAmazonS3Client(credentials, awsConf, getConf()); + InconsistentAmazonS3Client s3 + = new InconsistentAmazonS3Client( + parameters.getCredentialSet(), awsConf, getConf()); + configureAmazonS3Client(s3, + parameters.getEndpoint(), + parameters.isPathStyleAccess()); + return s3; } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java index bbb9faa000a08..19cd6c985b531 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java @@ -25,22 +25,24 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.SdkBaseException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.util.DurationInfo; +import org.apache.hadoop.util.functional.CallableRaisingIOE; /** * Class to provide lambda expression invocation of AWS operations. * * The core retry logic is in - * {@link #retryUntranslated(String, boolean, Retried, Operation)}; + * {@link #retryUntranslated(String, boolean, Retried, CallableRaisingIOE)}; * the other {@code retry() and retryUntranslated()} calls are wrappers. * - * The static {@link #once(String, String, Operation)} and + * The static {@link #once(String, String, CallableRaisingIOE)} and * {@link #once(String, String, VoidOperation)} calls take an operation and * return it with AWS exceptions translated to IOEs of some form. * @@ -56,11 +58,13 @@ * These callbacks can be used for reporting and incrementing statistics. * * The static {@link #quietly(String, String, VoidOperation)} and - * {@link #quietlyEval(String, String, Operation)} calls exist to take any - * operation and quietly catch and log at debug. The return value of - * {@link #quietlyEval(String, String, Operation)} is a java 8 optional, + * {@link #quietlyEval(String, String, CallableRaisingIOE)} calls exist to + * take any operation and quietly catch and log at debug. + * The return value of {@link #quietlyEval(String, String, CallableRaisingIOE)} + * is a java 8 optional, * which can then be used in java8-expressions. */ +@InterfaceAudience.Private public class Invoker { private static final Logger LOG = LoggerFactory.getLogger(Invoker.class); @@ -104,10 +108,11 @@ public Retried getRetryCallback() { * @throws IOException any IOE raised, or translated exception */ @Retries.OnceTranslated - public static T once(String action, String path, Operation operation) + public static T once(String action, String path, + CallableRaisingIOE operation) throws IOException { try (DurationInfo ignored = new DurationInfo(LOG, false, "%s", action)) { - return operation.execute(); + return operation.apply(); } catch (AmazonClientException e) { throw S3AUtils.translateException(action, path, e); } @@ -143,7 +148,7 @@ public static void ignoreIOExceptions( Logger log, String action, String path, - Operation operation) { + CallableRaisingIOE operation) { try { once(action, path, operation); } catch (IOException e) { @@ -280,7 +285,7 @@ public void maybeRetry( public T retry(String action, @Nullable String path, boolean idempotent, - Operation operation) + CallableRaisingIOE operation) throws IOException { return retry(action, path, idempotent, retryCallback, operation); @@ -288,7 +293,7 @@ public T retry(String action, /** * Execute a function with retry processing. - * Uses {@link #once(String, String, Operation)} as the inner + * Uses {@link #once(String, String, CallableRaisingIOE)} as the inner * invocation mechanism before retry logic is performed. * @param type of return value * @param action action to execute (used in error messages) @@ -306,7 +311,7 @@ public T retry( @Nullable String path, boolean idempotent, Retried retrying, - Operation operation) + CallableRaisingIOE operation) throws IOException { return retryUntranslated( toDescription(action, path), @@ -317,7 +322,7 @@ public T retry( /** * Execute a function with retry processing when doRetry=true, else just once. - * Uses {@link #once(String, String, Operation)} as the inner + * Uses {@link #once(String, String, CallableRaisingIOE)} as the inner * invocation mechanism before retry logic is performed. * @param type of return value * @param doRetry true if retries should be performed @@ -337,7 +342,7 @@ public T maybeRetry( @Nullable String path, boolean idempotent, Retried retrying, - Operation operation) + CallableRaisingIOE operation) throws IOException { if (doRetry) { return retryUntranslated( @@ -366,7 +371,7 @@ public T maybeRetry( public T retryUntranslated( String text, boolean idempotent, - Operation operation) throws IOException { + CallableRaisingIOE operation) throws IOException { return retryUntranslated(text, idempotent, retryCallback, operation); } @@ -391,7 +396,7 @@ public T retryUntranslated( String text, boolean idempotent, Retried retrying, - Operation operation) throws IOException { + CallableRaisingIOE operation) throws IOException { Preconditions.checkArgument(retrying != null, "null retrying argument"); int retryCount = 0; @@ -404,7 +409,7 @@ public T retryUntranslated( LOG.debug("retry #{}", retryCount); } // execute the operation, returning if successful - return operation.execute(); + return operation.apply(); } catch (IOException | SdkBaseException e) { caught = e; } @@ -490,7 +495,7 @@ public static void quietly(String action, */ public static Optional quietlyEval(String action, String path, - Operation operation) { + CallableRaisingIOE operation) { try { return Optional.of(once(action, path, operation)); } catch (Exception e) { @@ -510,15 +515,6 @@ private static String toDescription(String action, @Nullable String path) { (StringUtils.isNotEmpty(path) ? (" on " + path) : ""); } - /** - * Arbitrary operation throwing an IOException. - * @param return type - */ - @FunctionalInterface - public interface Operation { - T execute() throws IOException; - } - /** * Void operation which may raise an IOException. */ diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java index 9c2f67dd884ec..3cb3d5d832df3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java @@ -22,21 +22,35 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.impl.AbstractStoreOperation; +import org.apache.hadoop.fs.s3a.impl.ListingOperationCallbacks; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata; +import org.apache.hadoop.fs.s3a.s3guard.MetadataStoreListFilesIterator; +import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; +import org.apache.hadoop.fs.s3a.s3guard.S3Guard; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.util.functional.RemoteIterators; -import com.google.common.base.Preconditions; import org.slf4j.Logger; +import java.io.FileNotFoundException; import java.io.IOException; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -44,27 +58,42 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.StringJoiner; +import static org.apache.hadoop.fs.impl.FutureIOSupport.awaitFuture; import static org.apache.hadoop.fs.s3a.Constants.S3N_FOLDER_SUFFIX; +import static org.apache.hadoop.fs.s3a.S3AUtils.ACCEPT_ALL; import static org.apache.hadoop.fs.s3a.S3AUtils.createFileStatus; +import static org.apache.hadoop.fs.s3a.S3AUtils.maybeAddTrailingSlash; import static org.apache.hadoop.fs.s3a.S3AUtils.objectRepresentsDirectory; import static org.apache.hadoop.fs.s3a.S3AUtils.stringify; import static org.apache.hadoop.fs.s3a.S3AUtils.translateException; +import static org.apache.hadoop.fs.s3a.auth.RoleModel.pathToKey; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_CONTINUE_LIST_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; +import static org.apache.hadoop.util.functional.RemoteIterators.filteringRemoteIterator; +import static org.apache.hadoop.util.functional.RemoteIterators.remoteIteratorFromArray; +import static org.apache.hadoop.util.functional.RemoteIterators.remoteIteratorFromSingleton; /** * Place for the S3A listing classes; keeps all the small classes under control. */ @InterfaceAudience.Private -public class Listing { +public class Listing extends AbstractStoreOperation { - private final S3AFileSystem owner; private static final Logger LOG = S3AFileSystem.LOG; static final FileStatusAcceptor ACCEPT_ALL_BUT_S3N = new AcceptAllButS3nDirs(); - public Listing(S3AFileSystem owner) { - this.owner = owner; + private final ListingOperationCallbacks listingOperationCallbacks; + + public Listing(ListingOperationCallbacks listingOperationCallbacks, + StoreContext storeContext) { + super(storeContext); + this.listingOperationCallbacks = listingOperationCallbacks; } /** @@ -76,11 +105,27 @@ public Listing(S3AFileSystem owner) { * @param acceptor the file status acceptor * @return the file status iterator */ - ProvidedFileStatusIterator createProvidedFileStatusIterator( + RemoteIterator createProvidedFileStatusIterator( S3AFileStatus[] fileStatuses, PathFilter filter, FileStatusAcceptor acceptor) { - return new ProvidedFileStatusIterator(fileStatuses, filter, acceptor); + return filteringRemoteIterator( + remoteIteratorFromArray(fileStatuses), + status -> + filter.accept(status.getPath()) && acceptor.accept(status)); + } + + /** + * Create a FileStatus iterator against a provided list of file status. + * @param fileStatuses array of file status. + * @return the file status iterator. + */ + @VisibleForTesting + public static RemoteIterator toProvidedFileStatusIterator( + S3AFileStatus[] fileStatuses) { + return filteringRemoteIterator( + remoteIteratorFromArray(fileStatuses), + Listing.ACCEPT_ALL_BUT_S3N::accept); } /** @@ -125,21 +170,38 @@ public FileStatusListingIterator createFileStatusListingIterator( Listing.FileStatusAcceptor acceptor, RemoteIterator providedStatus) throws IOException { return new FileStatusListingIterator( - new ObjectListingIterator(listPath, request), + createObjectListingIterator(listPath, request), filter, acceptor, providedStatus); } + /** + * Create an object listing iterator against a path, with a given + * list object request. + * @param listPath path of the listing + * @param request initial request to make + * @return the iterator + * @throws IOException IO Problems + */ + @Retries.RetryRaw + public ObjectListingIterator createObjectListingIterator( + final Path listPath, + final S3ListRequest request) throws IOException { + return new ObjectListingIterator(listPath, request); + } + /** * Create a located status iterator over a file status iterator. * @param statusIterator an iterator over the remote status entries * @return a new remote iterator */ @VisibleForTesting - public LocatedFileStatusIterator createLocatedFileStatusIterator( + public RemoteIterator createLocatedFileStatusIterator( RemoteIterator statusIterator) { - return new LocatedFileStatusIterator(statusIterator); + return RemoteIterators.mappingRemoteIterator( + statusIterator, + listingOperationCallbacks::toLocatedFileStatus); } /** @@ -151,9 +213,219 @@ public LocatedFileStatusIterator createLocatedFileStatusIterator( * @return a new remote iterator. */ @VisibleForTesting - TombstoneReconcilingIterator createTombstoneReconcilingIterator( - RemoteIterator iterator, Set tombstones) { - return new TombstoneReconcilingIterator(iterator, tombstones); + RemoteIterator createTombstoneReconcilingIterator( + RemoteIterator iterator, + @Nullable Set tombstones) { + if (tombstones == null || tombstones.isEmpty()) { + // no need to filter. + return iterator; + } else { + return filteringRemoteIterator( + iterator, + candidate -> !tombstones.contains(candidate.getPath())); + } + } + + /** + * Create a remote iterator from a single status entry. + * @param status status + * @return iterator. + */ + public RemoteIterator createSingleStatusIterator( + S3ALocatedFileStatus status) { + return remoteIteratorFromSingleton(status); + } + + /** + * List files under a path assuming the path to be a directory. + * @param path input path. + * @param recursive recursive listing? + * @param acceptor file status filter + * @param collectTombstones should tombstones be collected from S3Guard? + * @param forceNonAuthoritativeMS forces metadata store to act like non + * authoritative. This is useful when + * listFiles output is used by import tool. + * @return an iterator over listing. + * @throws IOException any exception. + */ + public RemoteIterator getListFilesAssumingDir( + Path path, + boolean recursive, Listing.FileStatusAcceptor acceptor, + boolean collectTombstones, + boolean forceNonAuthoritativeMS) throws IOException { + + String key = maybeAddTrailingSlash(pathToKey(path)); + String delimiter = recursive ? null : "/"; + if (recursive) { + LOG.debug("Recursive list of all entries under {}", key); + } else { + LOG.debug("Requesting all entries under {} with delimiter '{}'", + key, delimiter); + } + final RemoteIterator cachedFilesIterator; + final Set tombstones; + boolean allowAuthoritative = listingOperationCallbacks + .allowAuthoritative(path); + if (recursive) { + final PathMetadata pm = getStoreContext() + .getMetadataStore() + .get(path, true); + if (pm != null) { + if (pm.isDeleted()) { + OffsetDateTime deletedAt = OffsetDateTime + .ofInstant(Instant.ofEpochMilli( + pm.getFileStatus().getModificationTime()), + ZoneOffset.UTC); + throw new FileNotFoundException("Path " + path + " is recorded as " + + "deleted by S3Guard at " + deletedAt); + } + } + MetadataStoreListFilesIterator metadataStoreListFilesIterator = + new MetadataStoreListFilesIterator( + getStoreContext().getMetadataStore(), + pm, + allowAuthoritative); + tombstones = metadataStoreListFilesIterator.listTombstones(); + // if all of the below is true + // - authoritative access is allowed for this metadatastore + // for this directory, + // - all the directory listings are authoritative on the client + // - the caller does not force non-authoritative access + // return the listing without any further s3 access + if (!forceNonAuthoritativeMS && + allowAuthoritative && + metadataStoreListFilesIterator.isRecursivelyAuthoritative()) { + S3AFileStatus[] statuses = S3AUtils.iteratorToStatuses( + metadataStoreListFilesIterator, tombstones); + cachedFilesIterator = createProvidedFileStatusIterator( + statuses, ACCEPT_ALL, acceptor); + return createLocatedFileStatusIterator(cachedFilesIterator); + } + cachedFilesIterator = metadataStoreListFilesIterator; + } else { + DirListingMetadata meta = + S3Guard.listChildrenWithTtl( + getStoreContext().getMetadataStore(), + path, + listingOperationCallbacks.getUpdatedTtlTimeProvider(), + allowAuthoritative); + if (meta != null) { + tombstones = meta.listTombstones(); + } else { + tombstones = null; + } + cachedFilesIterator = createProvidedFileStatusIterator( + S3Guard.dirMetaToStatuses(meta), ACCEPT_ALL, acceptor); + if (allowAuthoritative && meta != null && meta.isAuthoritative()) { + // metadata listing is authoritative, so return it directly + return createLocatedFileStatusIterator(cachedFilesIterator); + } + } + return createTombstoneReconcilingIterator( + createLocatedFileStatusIterator( + createFileStatusListingIterator(path, + listingOperationCallbacks + .createListObjectsRequest(key, delimiter), + ACCEPT_ALL, + acceptor, + cachedFilesIterator)), + collectTombstones ? tombstones : null); + } + + /** + * Generate list located status for a directory. + * Also performing tombstone reconciliation for guarded directories. + * @param dir directory to check. + * @param filter a path filter. + * @return an iterator that traverses statuses of the given dir. + * @throws IOException in case of failure. + */ + public RemoteIterator getLocatedFileStatusIteratorForDir( + Path dir, PathFilter filter) throws IOException { + final String key = maybeAddTrailingSlash(pathToKey(dir)); + final Listing.FileStatusAcceptor acceptor = + new Listing.AcceptAllButSelfAndS3nDirs(dir); + boolean allowAuthoritative = listingOperationCallbacks + .allowAuthoritative(dir); + DirListingMetadata meta = + S3Guard.listChildrenWithTtl(getStoreContext().getMetadataStore(), + dir, + listingOperationCallbacks + .getUpdatedTtlTimeProvider(), + allowAuthoritative); + Set tombstones = meta != null + ? meta.listTombstones() + : null; + final RemoteIterator cachedFileStatusIterator = + createProvidedFileStatusIterator( + S3Guard.dirMetaToStatuses(meta), filter, acceptor); + return (allowAuthoritative && meta != null + && meta.isAuthoritative()) + ? createLocatedFileStatusIterator( + cachedFileStatusIterator) + : createTombstoneReconcilingIterator( + createLocatedFileStatusIterator( + createFileStatusListingIterator(dir, + listingOperationCallbacks + .createListObjectsRequest(key, "/"), + filter, + acceptor, + cachedFileStatusIterator)), + tombstones); + } + + /** + * Calculate list of file statuses assuming path + * to be a non-empty directory. + * @param path input path. + * @return Triple of file statuses, metaData, auth flag. + * @throws IOException Any IO problems. + */ + public Triple, DirListingMetadata, Boolean> + getFileStatusesAssumingNonEmptyDir(Path path) + throws IOException { + String key = pathToKey(path); + List result; + if (!key.isEmpty()) { + key = key + '/'; + } + + boolean allowAuthoritative = listingOperationCallbacks + .allowAuthoritative(path); + DirListingMetadata dirMeta = + S3Guard.listChildrenWithTtl( + getStoreContext().getMetadataStore(), + path, + listingOperationCallbacks.getUpdatedTtlTimeProvider(), + allowAuthoritative); + // In auth mode return directly with auth flag. + if (allowAuthoritative && dirMeta != null && dirMeta.isAuthoritative()) { + RemoteIterator mfsItr = createProvidedFileStatusIterator( + S3Guard.dirMetaToStatuses(dirMeta), + ACCEPT_ALL, + Listing.ACCEPT_ALL_BUT_S3N); + return Triple.of(mfsItr, + dirMeta, Boolean.TRUE); + } + + S3ListRequest request = createListObjectsRequest(key, "/"); + LOG.debug("listStatus: doing listObjects for directory {}", key); + + FileStatusListingIterator filesItr = createFileStatusListingIterator( + path, + request, + ACCEPT_ALL, + new Listing.AcceptAllButSelfAndS3nDirs(path)); + + // return the results obtained from s3. + return Triple.of( + filesItr, + dirMeta, + Boolean.FALSE); + } + + public S3ListRequest createListObjectsRequest(String key, String delimiter) { + return listingOperationCallbacks.createListObjectsRequest(key, delimiter); } /** @@ -188,105 +460,6 @@ interface FileStatusAcceptor { boolean accept(FileStatus status); } - /** - * A remote iterator which only iterates over a single `LocatedFileStatus` - * value. - * - * If the status value is null, the iterator declares that it has no data. - * This iterator is used to handle {@link S3AFileSystem#listStatus} calls - * where the path handed in refers to a file, not a directory: this is the - * iterator returned. - */ - static final class SingleStatusRemoteIterator - implements RemoteIterator { - - /** - * The status to return; set to null after the first iteration. - */ - private S3ALocatedFileStatus status; - - /** - * Constructor. - * @param status status value: may be null, in which case - * the iterator is empty. - */ - SingleStatusRemoteIterator(S3ALocatedFileStatus status) { - this.status = status; - } - - /** - * {@inheritDoc} - * @return true if there is a file status to return: this is always false - * for the second iteration, and may be false for the first. - * @throws IOException never - */ - @Override - public boolean hasNext() throws IOException { - return status != null; - } - - /** - * {@inheritDoc} - * @return the non-null status element passed in when the instance was - * constructed, if it ha not already been retrieved. - * @throws IOException never - * @throws NoSuchElementException if this is the second call, or it is - * the first call and a null {@link LocatedFileStatus} entry was passed - * to the constructor. - */ - @Override - public S3ALocatedFileStatus next() throws IOException { - if (hasNext()) { - S3ALocatedFileStatus s = this.status; - status = null; - return s; - } else { - throw new NoSuchElementException(); - } - } - } - - /** - * This wraps up a provided non-null list of file status as a remote iterator. - * - * It firstly filters the provided list and later {@link #next} call will get - * from the filtered list. This suffers from scalability issues if the - * provided list is too large. - * - * There is no remote data to fetch. - */ - static class ProvidedFileStatusIterator - implements RemoteIterator { - private final ArrayList filteredStatusList; - private int index = 0; - - ProvidedFileStatusIterator(S3AFileStatus[] fileStatuses, PathFilter filter, - FileStatusAcceptor acceptor) { - Preconditions.checkArgument(fileStatuses != null, "Null status list!"); - - filteredStatusList = new ArrayList<>(fileStatuses.length); - for (S3AFileStatus status : fileStatuses) { - if (filter.accept(status.getPath()) && acceptor.accept(status)) { - filteredStatusList.add(status); - } - } - filteredStatusList.trimToSize(); - } - - @Override - public boolean hasNext() throws IOException { - return index < filteredStatusList.size(); - } - - @Override - public S3AFileStatus next() throws IOException { - if (!hasNext()) { - throw new NoSuchElementException(); - } - return filteredStatusList.get(index++); - } - } - /** * Wraps up object listing into a remote iterator which will ask for more * listing data if needed. @@ -314,7 +487,7 @@ public S3AFileStatus next() throws IOException { * Thread safety: None. */ class FileStatusListingIterator - implements RemoteIterator { + implements RemoteIterator, IOStatisticsSource { /** Source of objects. */ private final ObjectListingIterator source; @@ -465,14 +638,15 @@ private boolean buildNextStatusBatch(S3ListResult objects) { // objects for (S3ObjectSummary summary : objects.getObjectSummaries()) { String key = summary.getKey(); - Path keyPath = owner.keyToQualifiedPath(key); + Path keyPath = getStoreContext().getContextAccessors().keyToPath(key); if (LOG.isDebugEnabled()) { LOG.debug("{}: {}", keyPath, stringify(summary)); } // Skip over keys that are ourselves and old S3N _$folder$ files if (acceptor.accept(keyPath, summary) && filter.accept(keyPath)) { S3AFileStatus status = createFileStatus(keyPath, summary, - owner.getDefaultBlockSize(keyPath), owner.getUsername(), + listingOperationCallbacks.getDefaultBlockSize(keyPath), + getStoreContext().getUsername(), summary.getETag(), null); LOG.debug("Adding: {}", status); stats.add(status); @@ -485,10 +659,12 @@ private boolean buildNextStatusBatch(S3ListResult objects) { // prefixes: always directories for (String prefix : objects.getCommonPrefixes()) { - Path keyPath = owner.keyToQualifiedPath(prefix); + Path keyPath = getStoreContext() + .getContextAccessors() + .keyToPath(prefix); if (acceptor.accept(keyPath, prefix) && filter.accept(keyPath)) { S3AFileStatus status = new S3AFileStatus(Tristate.FALSE, keyPath, - owner.getUsername()); + getStoreContext().getUsername()); LOG.debug("Adding directory: {}", status); added++; stats.add(status); @@ -514,6 +690,23 @@ private boolean buildNextStatusBatch(S3ListResult objects) { public int getBatchSize() { return batchSize; } + + /** + * Return any IOStatistics provided by the underlying stream. + * @return IO stats from the inner stream. + */ + @Override + public IOStatistics getIOStatistics() { + return source.getIOStatistics(); + } + + @Override + public String toString() { + return new StringJoiner(", ", + FileStatusListingIterator.class.getSimpleName() + "[", "]") + .add(source.toString()) + .toString(); + } } /** @@ -536,7 +729,8 @@ public int getBatchSize() { * * Thread safety: none. */ - class ObjectListingIterator implements RemoteIterator { + class ObjectListingIterator implements RemoteIterator, + IOStatisticsSource { /** The path listed. */ private final Path listPath; @@ -561,6 +755,18 @@ class ObjectListingIterator implements RemoteIterator { */ private int maxKeys; + private final IOStatisticsStore iostats; + + /** + * Future to store current batch listing result. + */ + private CompletableFuture s3ListResultFuture; + + /** + * Result of previous batch. + */ + private S3ListResult objectsPrev; + /** * Constructor -calls `listObjects()` on the request to populate the * initial set of results/fail if there was a problem talking to the bucket. @@ -573,9 +779,15 @@ class ObjectListingIterator implements RemoteIterator { Path listPath, S3ListRequest request) throws IOException { this.listPath = listPath; - this.maxKeys = owner.getMaxKeys(); - this.objects = owner.listObjects(request); + this.maxKeys = listingOperationCallbacks.getMaxKeys(); this.request = request; + this.objectsPrev = null; + this.iostats = iostatisticsStore() + .withDurationTracking(OBJECT_LIST_REQUEST) + .withDurationTracking(OBJECT_CONTINUE_LIST_REQUEST) + .build(); + this.s3ListResultFuture = listingOperationCallbacks + .listObjectsAsync(request, iostats); } /** @@ -586,7 +798,8 @@ class ObjectListingIterator implements RemoteIterator { */ @Override public boolean hasNext() throws IOException { - return firstListing || objects.isTruncated(); + return firstListing || + (objectsPrev != null && objectsPrev.isTruncated()); } /** @@ -602,35 +815,57 @@ public boolean hasNext() throws IOException { @Retries.RetryTranslated public S3ListResult next() throws IOException { if (firstListing) { - // on the first listing, don't request more data. - // Instead just clear the firstListing flag so that it future calls - // will request new data. + // clear the firstListing flag for future calls. firstListing = false; + // Calculating the result of last async list call. + objects = awaitFuture(s3ListResultFuture); + fetchNextBatchAsyncIfPresent(); } else { try { - if (!objects.isTruncated()) { + if (objectsPrev!= null && !objectsPrev.isTruncated()) { // nothing more to request: fail. throw new NoSuchElementException("No more results in listing of " - + listPath); + + listPath); } - // need to request a new set of objects. - LOG.debug("[{}], Requesting next {} objects under {}", - listingCount, maxKeys, listPath); - objects = owner.continueListObjects(request, objects); + // Calculating the result of last async list call. + objects = awaitFuture(s3ListResultFuture); + // Requesting next batch of results. + fetchNextBatchAsyncIfPresent(); listingCount++; LOG.debug("New listing status: {}", this); } catch (AmazonClientException e) { throw translateException("listObjects()", listPath, e); } } - return objects; + // Storing the current result to be used by hasNext() call. + objectsPrev = objects; + return objectsPrev; + } + + /** + * If there are more listings present, call for next batch async. + * @throws IOException + */ + private void fetchNextBatchAsyncIfPresent() throws IOException { + if (objects.isTruncated()) { + LOG.debug("[{}], Requesting next {} objects under {}", + listingCount, maxKeys, listPath); + s3ListResultFuture = listingOperationCallbacks + .continueListObjectsAsync(request, objects, iostats); + } } @Override public String toString() { return "Object listing iterator against " + listPath + "; listing count "+ listingCount - + "; isTruncated=" + objects.isTruncated(); + + "; isTruncated=" + objects.isTruncated() + + "; " + iostats; + } + + @Override + public IOStatistics getIOStatistics() { + return iostats; } /** @@ -693,88 +928,6 @@ public boolean accept(FileStatus status) { } } - /** - * Take a remote iterator over a set of {@link FileStatus} instances and - * return a remote iterator of {@link LocatedFileStatus} instances. - */ - class LocatedFileStatusIterator - implements RemoteIterator { - private final RemoteIterator statusIterator; - - /** - * Constructor. - * @param statusIterator an iterator over the remote status entries - */ - LocatedFileStatusIterator(RemoteIterator statusIterator) { - this.statusIterator = statusIterator; - } - - @Override - public boolean hasNext() throws IOException { - return statusIterator.hasNext(); - } - - @Override - public S3ALocatedFileStatus next() throws IOException { - return owner.toLocatedFileStatus(statusIterator.next()); - } - } - - /** - * Wraps another iterator and filters out files that appear in the provided - * set of tombstones. Will read ahead in the iterator when necessary to - * ensure that emptiness is detected early enough if only deleted objects - * remain in the source iterator. - */ - static class TombstoneReconcilingIterator implements - RemoteIterator { - private S3ALocatedFileStatus next = null; - private final RemoteIterator iterator; - private final Set tombstones; - - /** - * @param iterator Source iterator to filter - * @param tombstones set of tombstone markers to filter out of results - */ - TombstoneReconcilingIterator(RemoteIterator - iterator, Set tombstones) { - this.iterator = iterator; - if (tombstones != null) { - this.tombstones = tombstones; - } else { - this.tombstones = Collections.emptySet(); - } - } - - private boolean fetch() throws IOException { - while (next == null && iterator.hasNext()) { - S3ALocatedFileStatus candidate = iterator.next(); - if (!tombstones.contains(candidate.getPath())) { - next = candidate; - return true; - } - } - return false; - } - - public boolean hasNext() throws IOException { - if (next != null) { - return true; - } - return fetch(); - } - - public S3ALocatedFileStatus next() throws IOException { - if (hasNext()) { - S3ALocatedFileStatus result = next; - next = null; - fetch(); - return result; - } - throw new NoSuchElementException(); - } - } - /** * Accept all entries except those which map to S3N pseudo directory markers. */ @@ -843,4 +996,9 @@ public boolean accept(FileStatus status) { } } + public static RemoteIterator toLocatedFileStatusIterator( + RemoteIterator iterator) { + return (RemoteIterator < LocatedFileStatus >) iterator; + } + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java index 66cac99de7baa..4f06981bc2d45 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java @@ -20,15 +20,17 @@ import java.io.IOException; import java.io.OutputStream; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.StringJoiner; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import com.amazonaws.AmazonClientException; import com.amazonaws.event.ProgressEvent; import com.amazonaws.event.ProgressEventType; import com.amazonaws.event.ProgressListener; @@ -36,23 +38,35 @@ import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.UploadPartRequest; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; + +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Abortable; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.StreamCapabilities; +import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.s3a.commit.CommitConstants; import org.apache.hadoop.fs.s3a.commit.PutTracker; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsLogging; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.util.Progressable; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.Statistic.*; +import static org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext.EMPTY_BLOCK_OUTPUT_STREAM_STATISTICS; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.emptyStatistics; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfInvocation; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; /** @@ -63,15 +77,20 @@ * is instead done as a single PUT operation. * * Unstable: statistics and error handling might evolve. + * + * Syncable is declared as supported so the calls can be + * explicitly rejected. */ @InterfaceAudience.Private @InterfaceStability.Unstable class S3ABlockOutputStream extends OutputStream implements - StreamCapabilities { + StreamCapabilities, IOStatisticsSource, Syncable, Abortable { private static final Logger LOG = LoggerFactory.getLogger(S3ABlockOutputStream.class); + private static final String E_NOT_SYNCABLE = "S3A streams are not Syncable"; + /** Owner FileSystem. */ private final S3AFileSystem fs; @@ -81,6 +100,9 @@ class S3ABlockOutputStream extends OutputStream implements /** Size of all blocks. */ private final int blockSize; + /** IO Statistics. */ + private final IOStatistics iostatistics; + /** Total bytes for uploads submitted so far. */ private long bytesSubmitted; @@ -109,7 +131,7 @@ class S3ABlockOutputStream extends OutputStream implements private long blockCount = 0; /** Statistics to build up. */ - private final S3AInstrumentation.OutputStreamStatistics statistics; + private final BlockOutputStreamStatistics statistics; /** * Write operation helper; encapsulation of the filesystem operations. @@ -146,7 +168,7 @@ class S3ABlockOutputStream extends OutputStream implements Progressable progress, long blockSize, S3ADataBlocks.BlockFactory blockFactory, - S3AInstrumentation.OutputStreamStatistics statistics, + BlockOutputStreamStatistics statistics, WriteOperationHelper writeOperationHelper, PutTracker putTracker) throws IOException { @@ -154,7 +176,13 @@ class S3ABlockOutputStream extends OutputStream implements this.key = key; this.blockFactory = blockFactory; this.blockSize = (int) blockSize; - this.statistics = statistics; + this.statistics = statistics != null + ? statistics + : EMPTY_BLOCK_OUTPUT_STREAM_STATISTICS; + // test instantiations may not provide statistics; + this.iostatistics = statistics != null + ? statistics.getIOStatistics() + : emptyStatistics(); this.writeOperationHelper = writeOperationHelper; this.putTracker = putTracker; Preconditions.checkArgument(blockSize >= Constants.MULTIPART_MIN_SIZE, @@ -282,6 +310,7 @@ public synchronized void write(byte[] source, int offset, int len) if (len == 0) { return; } + statistics.writeBytes(len); S3ADataBlocks.DataBlock block = createBlockIfNeeded(); int written = block.write(source, offset, len); int remainingCapacity = block.remainingCapacity(); @@ -304,8 +333,8 @@ public synchronized void write(byte[] source, int offset, int len) /** * Start an asynchronous upload of the current block. - * @throws IOException Problems opening the destination for upload - * or initializing the upload. + * @throws IOException Problems opening the destination for upload, + * initializing the upload, or if a previous operation has failed. */ private synchronized void uploadCurrentBlock() throws IOException { Preconditions.checkState(hasActiveBlock(), "No active block"); @@ -382,7 +411,8 @@ public void close() throws IOException { // then complete the operation if (putTracker.aboutToComplete(multiPartUpload.getUploadId(), partETags, - bytes)) { + bytes, + iostatistics)) { multiPartUpload.complete(partETags); } else { LOG.info("File {} will be visible when the job is committed", key); @@ -394,18 +424,113 @@ public void close() throws IOException { } LOG.debug("Upload complete to {} by {}", key, writeOperationHelper); } catch (IOException ioe) { + // the operation failed. + // if this happened during a multipart upload, abort the + // operation, so as to not leave (billable) data + // pending on the bucket + maybeAbortMultipart(); writeOperationHelper.writeFailed(ioe); throw ioe; } finally { - cleanupWithLogger(LOG, block, blockFactory); - LOG.debug("Statistics: {}", statistics); - cleanupWithLogger(LOG, statistics); - clearActiveBlock(); + cleanupOnClose(); } // Note end of write. This does not change the state of the remote FS. writeOperationHelper.writeSuccessful(bytes); } + /** + * Final operations in close/abort of stream. + * Shuts down block factory, closes any active block, + * and pushes out statistics. + */ + private synchronized void cleanupOnClose() { + cleanupWithLogger(LOG, getActiveBlock(), blockFactory); + LOG.debug("Statistics: {}", statistics); + cleanupWithLogger(LOG, statistics); + clearActiveBlock(); + } + + /** + * Best effort abort of the multipart upload; sets + * the field to null afterwards. + * @return any exception caught during the operation. + */ + private synchronized IOException maybeAbortMultipart() { + if (multiPartUpload != null) { + final IOException ioe = multiPartUpload.abort(); + multiPartUpload = null; + return ioe; + } else { + return null; + } + } + + /** + * Abort any active uploads, enter closed state. + * @return the outcome + */ + @Override + public AbortableResult abort() { + if (closed.getAndSet(true)) { + // already closed + LOG.debug("Ignoring abort() as stream is already closed"); + return new AbortableResultImpl(true, null); + } + try (DurationTracker d = + statistics.trackDuration(INVOCATION_ABORT.getSymbol())) { + return new AbortableResultImpl(false, maybeAbortMultipart()); + } finally { + cleanupOnClose(); + } + } + + /** + * Abortable result. + */ + private static final class AbortableResultImpl implements AbortableResult { + + /** + * Had the stream already been closed/aborted? + */ + private final boolean alreadyClosed; + + /** + * Was any exception raised during non-essential + * cleanup actions (i.e. MPU abort)? + */ + private final IOException anyCleanupException; + + /** + * Constructor. + * @param alreadyClosed Had the stream already been closed/aborted? + * @param anyCleanupException Was any exception raised during cleanup? + */ + private AbortableResultImpl(final boolean alreadyClosed, + final IOException anyCleanupException) { + this.alreadyClosed = alreadyClosed; + this.anyCleanupException = anyCleanupException; + } + + @Override + public boolean alreadyClosed() { + return alreadyClosed; + } + + @Override + public IOException anyCleanupException() { + return anyCleanupException; + } + + @Override + public String toString() { + return new StringJoiner(", ", + AbortableResultImpl.class.getSimpleName() + "[", "]") + .add("alreadyClosed=" + alreadyClosed) + .add("anyCleanupException=" + anyCleanupException) + .toString(); + } + } + /** * Upload the current block as a single PUT request; if the buffer * is empty a 0-byte PUT will be invoked, as it is needed to create an @@ -424,11 +549,10 @@ private int putObject() throws IOException { final PutObjectRequest putObjectRequest = uploadData.hasFile() ? writeOperationHelper.createPutObjectRequest(key, uploadData.getFile()) : writeOperationHelper.createPutObjectRequest(key, - uploadData.getUploadStream(), size); - long transferQueueTime = now(); + uploadData.getUploadStream(), size, null); BlockUploadProgress callback = new BlockUploadProgress( - block, progressListener, transferQueueTime); + block, progressListener, now()); putObjectRequest.setGeneralProgressListener(callback); statistics.blockUploadQueued(size); ListenableFuture putObjectResult = @@ -466,6 +590,8 @@ public String toString() { if (block != null) { sb.append(", activeBlock=").append(block); } + sb.append(" Statistics=") + .append(IOStatisticsLogging.ioStatisticsSourceToString(this)); sb.append('}'); return sb.toString(); } @@ -478,15 +604,15 @@ private void incrementWriteOperations() { * Current time in milliseconds. * @return time */ - private long now() { - return System.currentTimeMillis(); + private Instant now() { + return Instant.now(); } /** * Get the statistics for this stream. * @return stream statistics */ - S3AInstrumentation.OutputStreamStatistics getStatistics() { + BlockOutputStreamStatistics getStatistics() { return statistics; } @@ -513,11 +639,34 @@ public boolean hasCapability(String capability) { case StreamCapabilities.HSYNC: return false; + // yes, we do statistics. + case StreamCapabilities.IOSTATISTICS: + return true; + + // S3A supports abort. + case StreamCapabilities.ABORTABLE_STREAM: + return true; + default: return false; } } + @Override + public void hflush() throws IOException { + throw new UnsupportedOperationException(E_NOT_SYNCABLE); + } + + @Override + public void hsync() throws IOException { + throw new UnsupportedOperationException(E_NOT_SYNCABLE); + } + + @Override + public IOStatistics getIOStatistics() { + return iostatistics; + } + /** * Multiple partition upload. */ @@ -528,6 +677,13 @@ private class MultiPartUpload { private int partsUploaded; private long bytesSubmitted; + /** + * Any IOException raised during block upload. + * if non-null, then close() MUST NOT complete + * the file upload. + */ + private IOException blockUploadFailure; + MultiPartUpload(String key) throws IOException { this.uploadId = writeOperationHelper.initiateMultiPartUpload(key); this.partETagsFutures = new ArrayList<>(2); @@ -568,35 +724,63 @@ public long getBytesSubmitted() { return bytesSubmitted; } + /** + * A block upload has failed. + * Recorded it if there has been no previous failure. + * @param e error + */ + public void noteUploadFailure(final IOException e) { + if (blockUploadFailure == null) { + blockUploadFailure = e; + } + } + + /** + * If there is a block upload failure -throw it. + * @throws IOException if one has already been caught. + */ + public void maybeRethrowUploadFailure() throws IOException { + if (blockUploadFailure != null) { + throw blockUploadFailure; + } + } + /** * Upload a block of data. * This will take the block * @param block block to upload * @throws IOException upload failure + * @throws PathIOException if too many blocks were written */ private void uploadBlockAsync(final S3ADataBlocks.DataBlock block) throws IOException { LOG.debug("Queueing upload of {} for upload {}", block, uploadId); Preconditions.checkNotNull(uploadId, "Null uploadId"); + maybeRethrowUploadFailure(); partsSubmitted++; final int size = block.dataSize(); bytesSubmitted += size; - final S3ADataBlocks.BlockUploadData uploadData = block.startUpload(); final int currentPartNumber = partETagsFutures.size() + 1; - final UploadPartRequest request = - writeOperationHelper.newUploadPartRequest( - key, - uploadId, - currentPartNumber, - size, - uploadData.getUploadStream(), - uploadData.getFile(), - 0L); - - long transferQueueTime = now(); + final UploadPartRequest request; + final S3ADataBlocks.BlockUploadData uploadData; + try { + uploadData = block.startUpload(); + request = writeOperationHelper.newUploadPartRequest( + key, + uploadId, + currentPartNumber, + size, + uploadData.getUploadStream(), + uploadData.getFile(), + 0L); + } catch (IOException e) { + // failure to start the upload. + noteUploadFailure(e); + throw e; + } BlockUploadProgress callback = new BlockUploadProgress( - block, progressListener, transferQueueTime); + block, progressListener, now()); request.setGeneralProgressListener(callback); statistics.blockUploadQueued(block.dataSize()); ListenableFuture partETagFuture = @@ -613,6 +797,10 @@ private void uploadBlockAsync(final S3ADataBlocks.DataBlock block) LOG.debug("Stream statistics of {}", statistics); partsUploaded++; return partETag; + } catch (IOException e) { + // save immediately. + noteUploadFailure(e); + throw e; } finally { // close the stream and block cleanupWithLogger(LOG, uploadData, block); @@ -638,14 +826,20 @@ private List waitForAllPartUploads() throws IOException { //there is no way of recovering so abort //cancel all partUploads LOG.debug("While waiting for upload completion", ee); - LOG.debug("Cancelling futures"); - for (ListenableFuture future : partETagsFutures) { - future.cancel(true); - } //abort multipartupload this.abort(); throw extractException("Multi-part upload with id '" + uploadId - + "' to " + key, key, ee); + + "' to " + key, key, ee); + } + } + + /** + * Cancel all active uploads. + */ + private void cancelAllActiveFutures() { + LOG.debug("Cancelling futures"); + for (ListenableFuture future : partETagsFutures) { + future.cancel(true); } } @@ -658,37 +852,46 @@ private List waitForAllPartUploads() throws IOException { */ private void complete(List partETags) throws IOException { + maybeRethrowUploadFailure(); AtomicInteger errorCount = new AtomicInteger(0); try { - writeOperationHelper.completeMPUwithRetries(key, - uploadId, - partETags, - bytesSubmitted, - errorCount); + trackDurationOfInvocation(statistics, + MULTIPART_UPLOAD_COMPLETED.getSymbol(), () -> { + writeOperationHelper.completeMPUwithRetries(key, + uploadId, + partETags, + bytesSubmitted, + errorCount); + }); } finally { statistics.exceptionInMultipartComplete(errorCount.get()); } } /** - * Abort a multi-part upload. Retries are attempted on failures. + * Abort a multi-part upload. Retries are not attempted on failures. * IOExceptions are caught; this is expected to be run as a cleanup process. + * @return any caught exception. */ - public void abort() { - int retryCount = 0; - AmazonClientException lastException; - fs.incrementStatistic(OBJECT_MULTIPART_UPLOAD_ABORTED); + private IOException abort() { + LOG.debug("Aborting upload"); try { - writeOperationHelper.abortMultipartUpload(key, uploadId, - (text, e, r, i) -> statistics.exceptionInMultipartAbort()); + trackDurationOfInvocation(statistics, + OBJECT_MULTIPART_UPLOAD_ABORTED.getSymbol(), () -> { + cancelAllActiveFutures(); + writeOperationHelper.abortMultipartUpload(key, uploadId, + false, null); + }); + return null; } catch (IOException e) { // this point is only reached if the operation failed more than // the allowed retry count LOG.warn("Unable to abort multipart upload," + " you may need to purge uploaded parts", e); + statistics.exceptionInMultipartAbort(); + return e; } } - } /** @@ -700,8 +903,8 @@ public void abort() { private final class BlockUploadProgress implements ProgressListener { private final S3ADataBlocks.DataBlock block; private final ProgressListener nextListener; - private final long transferQueueTime; - private long transferStartTime; + private final Instant transferQueueTime; + private Instant transferStartTime; /** * Track the progress of a single block upload. @@ -712,7 +915,7 @@ private final class BlockUploadProgress implements ProgressListener { */ private BlockUploadProgress(S3ADataBlocks.DataBlock block, ProgressListener nextListener, - long transferQueueTime) { + Instant transferQueueTime) { this.block = block; this.transferQueueTime = transferQueueTime; this.nextListener = nextListener; @@ -733,17 +936,22 @@ public void progressChanged(ProgressEvent progressEvent) { case TRANSFER_PART_STARTED_EVENT: transferStartTime = now(); - statistics.blockUploadStarted(transferStartTime - transferQueueTime, + statistics.blockUploadStarted( + Duration.between(transferQueueTime, transferStartTime), size); incrementWriteOperations(); break; case TRANSFER_PART_COMPLETED_EVENT: - statistics.blockUploadCompleted(now() - transferStartTime, size); + statistics.blockUploadCompleted( + Duration.between(transferStartTime, now()), + size); break; case TRANSFER_PART_FAILED_EVENT: - statistics.blockUploadFailed(now() - transferStartTime, size); + statistics.blockUploadFailed( + Duration.between(transferStartTime, now()), + size); LOG.warn("Transfer failure of block {}", block); break; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java index 156defb7ca031..250317706900c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java @@ -32,11 +32,12 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSExceptionMessages; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; import org.apache.hadoop.util.DirectBufferPool; import static org.apache.hadoop.fs.s3a.S3ADataBlocks.DataBlock.DestState.*; @@ -180,7 +181,7 @@ protected BlockFactory(S3AFileSystem owner) { * @return a new block. */ abstract DataBlock create(long index, int limit, - S3AInstrumentation.OutputStreamStatistics statistics) + BlockOutputStreamStatistics statistics) throws IOException; /** @@ -210,10 +211,10 @@ enum DestState {Writing, Upload, Closed} private volatile DestState state = Writing; protected final long index; - protected final S3AInstrumentation.OutputStreamStatistics statistics; + private final BlockOutputStreamStatistics statistics; protected DataBlock(long index, - S3AInstrumentation.OutputStreamStatistics statistics) { + BlockOutputStreamStatistics statistics) { this.index = index; this.statistics = statistics; } @@ -372,6 +373,10 @@ protected void blockReleased() { statistics.blockReleased(); } } + + protected BlockOutputStreamStatistics getStatistics() { + return statistics; + } } // ==================================================================== @@ -387,7 +392,7 @@ static class ArrayBlockFactory extends BlockFactory { @Override DataBlock create(long index, int limit, - S3AInstrumentation.OutputStreamStatistics statistics) + BlockOutputStreamStatistics statistics) throws IOException { return new ByteArrayBlock(0, limit, statistics); } @@ -432,7 +437,7 @@ static class ByteArrayBlock extends DataBlock { ByteArrayBlock(long index, int limit, - S3AInstrumentation.OutputStreamStatistics statistics) { + BlockOutputStreamStatistics statistics) { super(index, statistics); this.limit = limit; buffer = new S3AByteArrayOutputStream(limit); @@ -510,7 +515,7 @@ static class ByteBufferBlockFactory extends BlockFactory { @Override ByteBufferBlock create(long index, int limit, - S3AInstrumentation.OutputStreamStatistics statistics) + BlockOutputStreamStatistics statistics) throws IOException { return new ByteBufferBlock(index, limit, statistics); } @@ -560,7 +565,7 @@ class ByteBufferBlock extends DataBlock { */ ByteBufferBlock(long index, int bufferSize, - S3AInstrumentation.OutputStreamStatistics statistics) { + BlockOutputStreamStatistics statistics) { super(index, statistics); this.bufferSize = bufferSize; blockBuffer = requestBuffer(bufferSize); @@ -805,7 +810,7 @@ static class DiskBlockFactory extends BlockFactory { @Override DataBlock create(long index, int limit, - S3AInstrumentation.OutputStreamStatistics statistics) + BlockOutputStreamStatistics statistics) throws IOException { File destFile = getOwner() .createTmpFileForWrite(String.format("s3ablock-%04d-", index), @@ -829,7 +834,7 @@ static class DiskBlock extends DataBlock { DiskBlock(File bufferFile, int limit, long index, - S3AInstrumentation.OutputStreamStatistics statistics) + BlockOutputStreamStatistics statistics) throws FileNotFoundException { super(index, statistics); this.limit = limit; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 9bd33a48df1b7..ca9bdf2bf5362 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -35,12 +35,14 @@ import java.util.Collections; import java.util.Date; import java.util.EnumSet; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.Objects; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -67,7 +69,6 @@ import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.SSEAwsKeyManagementParams; import com.amazonaws.services.s3.model.SSECustomerKey; import com.amazonaws.services.s3.model.UploadPartRequest; @@ -79,9 +80,8 @@ import com.amazonaws.services.s3.transfer.model.CopyResult; import com.amazonaws.services.s3.transfer.model.UploadResult; import com.amazonaws.event.ProgressListener; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,16 +104,29 @@ import org.apache.hadoop.fs.s3a.impl.ContextAccessors; import org.apache.hadoop.fs.s3a.impl.CopyOutcome; import org.apache.hadoop.fs.s3a.impl.DeleteOperation; +import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy; +import org.apache.hadoop.fs.s3a.impl.DirectoryPolicyImpl; +import org.apache.hadoop.fs.s3a.impl.HeaderProcessing; import org.apache.hadoop.fs.s3a.impl.InternalConstants; +import org.apache.hadoop.fs.s3a.impl.ListingOperationCallbacks; import org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport; import org.apache.hadoop.fs.s3a.impl.OperationCallbacks; import org.apache.hadoop.fs.s3a.impl.RenameOperation; +import org.apache.hadoop.fs.s3a.impl.S3AMultipartUploaderBuilder; import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.impl.StoreContextBuilder; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.fs.s3a.select.InternalSelectConstants; +import org.apache.hadoop.fs.s3a.tools.MarkerToolOperations; +import org.apache.hadoop.fs.s3a.tools.MarkerToolOperationsImpl; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.LambdaUtils; @@ -144,11 +157,14 @@ import org.apache.hadoop.fs.s3a.select.SelectBinding; import org.apache.hadoop.fs.s3a.select.SelectConstants; import org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata; -import org.apache.hadoop.fs.s3a.s3guard.MetadataStoreListFilesIterator; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; import org.apache.hadoop.fs.s3a.s3guard.S3Guard; import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; +import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; +import org.apache.hadoop.fs.s3a.statistics.impl.BondedS3AStatisticsContext; import org.apache.hadoop.fs.s3native.S3xLoginHelper; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.fs.store.EtagChecksum; @@ -160,10 +176,12 @@ import org.apache.hadoop.util.SemaphoredDelegatingExecutor; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import static java.util.Objects.requireNonNull; import static org.apache.hadoop.fs.impl.AbstractFSBuilderImpl.rejectUnknownMandatoryKeys; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.Invoker.*; +import static org.apache.hadoop.fs.s3a.Listing.toLocatedFileStatusIterator; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.Statistic.*; import static org.apache.commons.lang3.StringUtils.isNotEmpty; @@ -171,12 +189,23 @@ import static org.apache.hadoop.fs.s3a.auth.RolePolicies.allowS3Operations; import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.TokenIssuingPolicy.NoTokensAvailable; import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.hasDelegationTokenBinding; +import static org.apache.hadoop.fs.s3a.commit.CommitConstants.FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS; +import static org.apache.hadoop.fs.s3a.commit.CommitConstants.FS_S3A_COMMITTER_STAGING_ABORT_PENDING_UPLOADS; import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletionIgnoringExceptions; +import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isObjectNotFound; import static org.apache.hadoop.fs.s3a.impl.ErrorTranslation.isUnknownBucket; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_404; import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.fixBucketRegion; +import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.logDnsLookup; +import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.dirMetaToStatuses; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_CONTINUE_LIST_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.pairedTrackerFactory; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfInvocation; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfOperation; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; +import static org.apache.hadoop.util.functional.RemoteIterators.typeCastingRemoteIterator; /** * The core S3A Filesystem implementation. @@ -194,7 +223,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class S3AFileSystem extends FileSystem implements StreamCapabilities, - AWSPolicyProvider, DelegationTokenProvider { + AWSPolicyProvider, DelegationTokenProvider, IOStatisticsSource { /** * Default blocksize as used in blocksize and FS status queries. */ @@ -230,7 +259,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, private long partSize; private boolean enableMultiObjectsDelete; private TransferManager transfers; - private ListeningExecutorService boundedThreadPool; + private ExecutorService boundedThreadPool; private ThreadPoolExecutor unboundedThreadPool; private int executorCapacity; private long multiPartThreshold; @@ -246,9 +275,13 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, * is no encryption. */ private EncryptionSecrets encryptionSecrets = new EncryptionSecrets(); + /** The core instrumentation. */ private S3AInstrumentation instrumentation; - private final S3AStorageStatistics storageStatistics = - createStorageStatistics(); + /** Accessors to statistics for this FS. */ + private S3AStatisticsContext statisticsContext; + /** Storage Statistics Bonded to the instrumentation. */ + private S3AStorageStatistics storageStatistics; + private long readAhead; private S3AInputPolicy inputPolicy; private ChangeDetectionPolicy changeDetectionPolicy; @@ -288,12 +321,30 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, private final S3AFileSystem.OperationCallbacksImpl operationCallbacks = new OperationCallbacksImpl(); + private final ListingOperationCallbacks listingOperationCallbacks = + new ListingOperationCallbacksImpl(); + /** + * Directory policy. + */ + private DirectoryPolicy directoryPolicy; + + /** + * Header processing for XAttr. + */ + private HeaderProcessing headerProcessing; + + /** + * Context accessors for re-use. + */ + private final ContextAccessors contextAccessors = new ContextAccessorsImpl(); + /** Add any deprecated keys. */ @SuppressWarnings("deprecation") private static void addDeprecatedKeys() { - // this is retained as a placeholder for when new deprecated keys - // need to be added. Configuration.DeprecationDelta[] deltas = { + new Configuration.DeprecationDelta( + FS_S3A_COMMITTER_STAGING_ABORT_PENDING_UPLOADS, + FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS) }; if (deltas.length > 0) { @@ -321,6 +372,11 @@ public void initialize(URI name, Configuration originalConf) LOG.debug("Initializing S3AFileSystem for {}", bucket); // clone the configuration into one with propagated bucket options Configuration conf = propagateBucketOptions(originalConf, bucket); + // fix up the classloader of the configuration to be whatever + // classloader loaded this filesystem. + // See: HADOOP-17372 + conf.setClassLoader(this.getClass().getClassLoader()); + // patch the Hadoop security providers patchSecurityCredentialProviders(conf); // look for delegation token support early. @@ -342,6 +398,7 @@ public void initialize(URI name, Configuration originalConf) invoker = new Invoker(new S3ARetryPolicy(getConf()), onRetry); instrumentation = new S3AInstrumentation(uri); + initializeStatisticsBinding(); // Username is the current user at the time the FS was instantiated. owner = UserGroupInformation.getCurrentUser(); @@ -351,13 +408,13 @@ public void initialize(URI name, Configuration originalConf) s3guardInvoker = new Invoker(new S3GuardExistsRetryPolicy(getConf()), onRetry); - writeHelper = new WriteOperationHelper(this, getConf()); + writeHelper = new WriteOperationHelper(this, getConf(), + statisticsContext); failOnMetadataWriteError = conf.getBoolean(FAIL_ON_METADATA_WRITE_ERROR, FAIL_ON_METADATA_WRITE_ERROR_DEFAULT); maxKeys = intOption(conf, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS, 1); - listing = new Listing(this); partSize = getMultipartSizeProperty(conf, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE); multiPartThreshold = getMultipartSizeProperty(conf, @@ -407,6 +464,8 @@ public void initialize(URI name, Configuration originalConf) magicCommitterEnabled ? "is" : "is not"); committerIntegration = new MagicCommitIntegration( this, magicCommitterEnabled); + // header processing for rename and magic committer + headerProcessing = new HeaderProcessing(createStoreContext()); // instantiate S3 Select support selectBinding = new SelectBinding(writeHelper); @@ -445,11 +504,16 @@ public void initialize(URI name, Configuration originalConf) DEFAULT_S3GUARD_DISABLED_WARN_LEVEL); S3Guard.logS3GuardDisabled(LOG, warnLevel, bucket); } + // directory policy, which may look at authoritative paths + directoryPolicy = DirectoryPolicyImpl.getDirectoryPolicy(conf, + this::allowAuthoritative); + LOG.debug("Directory marker retention policy is {}", directoryPolicy); initMultipartUploads(conf); pageSize = intOption(getConf(), BULK_DELETE_PAGE_SIZE, BULK_DELETE_PAGE_SIZE_DEFAULT, 0); + listing = new Listing(listingOperationCallbacks, createStoreContext()); } catch (AmazonClientException e) { // amazon client exception: stop all services then throw the translation stopAllServices(); @@ -469,6 +533,8 @@ public void initialize(URI name, Configuration originalConf) * S3AFileSystem initialization. When set to 1 or 2, bucket existence check * will be performed which is potentially slow. * If 3 or higher: warn and use the v2 check. + * Also logging DNS address of the s3 endpoint if the bucket probe value is + * greater than 0 else skipping it for increased performance. * @throws UnknownStoreException the bucket is absent * @throws IOException any other problem talking to S3 */ @@ -483,9 +549,11 @@ private void doBucketProbing() throws IOException { LOG.debug("skipping check for bucket existence"); break; case 1: + logDnsLookup(getConf()); verifyBucketExists(); break; case 2: + logDnsLookup(getConf()); verifyBucketExistsV2(); break; default: @@ -497,6 +565,33 @@ private void doBucketProbing() throws IOException { } } + /** + * Initialize the statistics binding. + * This is done by creating an {@code IntegratedS3AStatisticsContext} + * with callbacks to get the FS's instrumentation and FileSystem.statistics + * field; the latter may change after {@link #initialize(URI, Configuration)}, + * so needs to be dynamically adapted. + * Protected so that (mock) subclasses can replace it with a + * different statistics binding, if desired. + */ + protected void initializeStatisticsBinding() { + storageStatistics = createStorageStatistics( + requireNonNull(getIOStatistics())); + statisticsContext = new BondedS3AStatisticsContext( + new BondedS3AStatisticsContext.S3AFSStatisticsSource() { + + @Override + public S3AInstrumentation getInstrumentation() { + return S3AFileSystem.this.getInstrumentation(); + } + + @Override + public Statistics getInstanceStatistics() { + return S3AFileSystem.this.statistics; + } + }); + } + /** * Initialize the thread pool. * This must be re-invoked after replacing the S3Client during test @@ -531,13 +626,15 @@ private void initThreadPools(Configuration conf) { /** * Create the storage statistics or bind to an existing one. - * @return a storage statistics instance. + * @param ioStatistics IOStatistics to build the storage statistics from. + * @return a storage statistics instance; expected to be that of the FS. */ - protected static S3AStorageStatistics createStorageStatistics() { + protected static S3AStorageStatistics createStorageStatistics( + final IOStatistics ioStatistics) { return (S3AStorageStatistics) GlobalStorageStatistics.INSTANCE .put(S3AStorageStatistics.NAME, - () -> new S3AStorageStatistics()); + () -> new S3AStorageStatistics(ioStatistics)); } /** @@ -576,10 +673,19 @@ protected void verifyBucketExistsV2() * Get S3A Instrumentation. For test purposes. * @return this instance's instrumentation. */ + @VisibleForTesting public S3AInstrumentation getInstrumentation() { return instrumentation; } + /** + * Get current listing instance. + * @return this instance's listing. + */ + public Listing getListing() { + return listing; + } + /** * Set up the client bindings. * If delegation tokens are enabled, the FS first looks for a DT @@ -635,8 +741,17 @@ private void bindAWSClient(URI name, boolean dtEnabled) throws IOException { S3_CLIENT_FACTORY_IMPL, DEFAULT_S3_CLIENT_FACTORY_IMPL, S3ClientFactory.class); + S3ClientFactory.S3ClientCreationParameters parameters = null; + parameters = new S3ClientFactory.S3ClientCreationParameters() + .withCredentialSet(credentials) + .withEndpoint(conf.getTrimmed(ENDPOINT, DEFAULT_ENDPOINT)) + .withMetrics(statisticsContext.newStatisticsFromAwsSdk()) + .withPathStyleAccess(conf.getBoolean(PATH_STYLE_ACCESS, false)) + .withUserAgentSuffix(uaSuffix); + s3 = ReflectionUtils.newInstance(s3ClientFactoryClass, conf) - .createS3Client(getUri(), bucket, credentials, uaSuffix); + .createS3Client(getUri(), + parameters); } /** @@ -1134,7 +1249,7 @@ private S3AReadOpContext createReadContext( invoker, s3guardInvoker, statistics, - instrumentation, + statisticsContext, fileStatus, seekPolicy, changePolicy, @@ -1231,15 +1346,19 @@ public FSDataOutputStream create(Path f, FsPermission permission, PutTracker putTracker = committerIntegration.createTracker(path, key); String destKey = putTracker.getDestKey(); + final BlockOutputStreamStatistics outputStreamStatistics + = statisticsContext.newOutputStreamStatistics(); return new FSDataOutputStream( new S3ABlockOutputStream(this, destKey, - new SemaphoredDelegatingExecutor(boundedThreadPool, - blockOutputActiveBlocks, true), + new SemaphoredDelegatingExecutor( + boundedThreadPool, + blockOutputActiveBlocks, + true), progress, partSize, blockFactory, - instrumentation.newOutputStreamStatistics(statistics), + outputStreamStatistics, getWriteOperationHelper(), putTracker), null); @@ -1265,7 +1384,7 @@ public WriteOperationHelper getWriteOperationHelper() { * is not a directory. */ @Override - public FSDataOutputStream createNonRecursive(Path path, + public FSDataOutputStream createNonRecursive(Path p, FsPermission permission, EnumSet flags, int bufferSize, @@ -1273,10 +1392,22 @@ public FSDataOutputStream createNonRecursive(Path path, long blockSize, Progressable progress) throws IOException { entryPoint(INVOCATION_CREATE_NON_RECURSIVE); + final Path path = makeQualified(p); Path parent = path.getParent(); - if (parent != null) { - // expect this to raise an exception if there is no parent - if (!getFileStatus(parent).isDirectory()) { + // expect this to raise an exception if there is no parent dir + if (parent != null && !parent.isRoot()) { + S3AFileStatus status; + try { + // optimize for the directory existing: Call list first + status = innerGetFileStatus(parent, false, + StatusProbeEnum.DIRECTORIES); + } catch (FileNotFoundException e) { + // no dir, fall back to looking for a file + // (failure condition if true) + status = innerGetFileStatus(parent, false, + StatusProbeEnum.HEAD_ONLY); + } + if (!status.isDirectory()) { throw new FileAlreadyExistsException("Not a directory: " + parent); } } @@ -1333,9 +1464,6 @@ public boolean rename(Path src, Path dst) throws IOException { LOG.info("{}", e.getMessage()); LOG.debug("rename failure", e); return e.getExitCode(); - } catch (FileNotFoundException e) { - LOG.debug(e.toString()); - return false; } } @@ -1388,9 +1516,9 @@ private Pair initiateRename( // whether or not it can be the destination of the rename. if (srcStatus.isDirectory()) { if (dstStatus.isFile()) { - throw new RenameFailedException(src, dst, - "source is a directory and dest is a file") - .withExitCode(srcStatus.isFile()); + throw new FileAlreadyExistsException( + "Failed to rename " + src + " to " + dst + +"; source is a directory and dest is a file"); } else if (dstStatus.isEmptyDirectory() != Tristate.TRUE) { throw new RenameFailedException(src, dst, "Destination is a non-empty directory") @@ -1401,9 +1529,9 @@ private Pair initiateRename( // source is a file. The destination must be a directory, // empty or not if (dstStatus.isFile()) { - throw new RenameFailedException(src, dst, - "Cannot rename onto an existing file") - .withExitCode(false); + throw new FileAlreadyExistsException( + "Failed to rename " + src + " to " + dst + + "; destination file exists"); } } @@ -1411,17 +1539,27 @@ private Pair initiateRename( LOG.debug("rename: destination path {} not found", dst); // Parent must exist Path parent = dst.getParent(); - if (!pathToKey(parent).isEmpty()) { + if (!pathToKey(parent).isEmpty() + && !parent.equals(src.getParent())) { try { - S3AFileStatus dstParentStatus = innerGetFileStatus(dst.getParent(), - false, StatusProbeEnum.ALL); + // make sure parent isn't a file. + // don't look for parent being a dir as there is a risk + // of a race between dest dir cleanup and rename in different + // threads. + S3AFileStatus dstParentStatus = innerGetFileStatus(parent, + false, StatusProbeEnum.FILE); + // if this doesn't raise an exception then it's one of + // raw S3: parent is a file: error + // guarded S3: parent is a file or a dir. if (!dstParentStatus.isDirectory()) { throw new RenameFailedException(src, dst, "destination parent is not a directory"); } - } catch (FileNotFoundException e2) { - throw new RenameFailedException(src, dst, - "destination has no parent "); + } catch (FileNotFoundException expected) { + // nothing was found. Don't worry about it; + // expect rename to implicitly create the parent dir (raw S3) + // or the s3guard parents (guarded) + } } } @@ -1515,14 +1653,14 @@ public void deleteObjectAtPath(final Path path, final boolean isFile, final BulkOperationState operationState) throws IOException { - once("delete", key, () -> + once("delete", path.toString(), () -> S3AFileSystem.this.deleteObjectAtPath(path, key, isFile, operationState)); } @Override @Retries.RetryTranslated - public RemoteIterator listFilesAndEmptyDirectories( + public RemoteIterator listFilesAndDirectoryMarkers( final Path path, final S3AFileStatus status, final boolean collectTombstones, @@ -1565,7 +1703,9 @@ public void finishRename(final Path sourceRenamed, final Path destCreated) Path destParent = destCreated.getParent(); if (!sourceRenamed.getParent().equals(destParent)) { LOG.debug("source & dest parents are different; fix up dir markers"); - deleteUnnecessaryFakeDirectories(destParent); + if (!keepDirectoryMarkers(destParent)) { + deleteUnnecessaryFakeDirectories(destParent, null); + } maybeCreateFakeParentDirectory(sourceRenamed); } } @@ -1590,16 +1730,80 @@ public RemoteIterator listObjects( } } + protected class ListingOperationCallbacksImpl implements + ListingOperationCallbacks { + + @Override + @Retries.RetryRaw + public CompletableFuture listObjectsAsync( + S3ListRequest request, + DurationTrackerFactory trackerFactory) + throws IOException { + return submit(unboundedThreadPool, () -> + listObjects(request, + pairedTrackerFactory(trackerFactory, + getDurationTrackerFactory()))); + } + + @Override + @Retries.RetryRaw + public CompletableFuture continueListObjectsAsync( + S3ListRequest request, + S3ListResult prevResult, + DurationTrackerFactory trackerFactory) + throws IOException { + return submit(unboundedThreadPool, + () -> continueListObjects(request, prevResult, + pairedTrackerFactory(trackerFactory, + getDurationTrackerFactory()))); + } + + @Override + public S3ALocatedFileStatus toLocatedFileStatus( + S3AFileStatus status) + throws IOException { + return S3AFileSystem.this.toLocatedFileStatus(status); + } + + @Override + public S3ListRequest createListObjectsRequest( + String key, + String delimiter) { + return S3AFileSystem.this.createListObjectsRequest(key, delimiter); + } + + @Override + public long getDefaultBlockSize(Path path) { + return S3AFileSystem.this.getDefaultBlockSize(path); + } + + @Override + public int getMaxKeys() { + return S3AFileSystem.this.getMaxKeys(); + } + + @Override + public ITtlTimeProvider getUpdatedTtlTimeProvider() { + return S3AFileSystem.this.ttlTimeProvider; + } + + @Override + public boolean allowAuthoritative(final Path p) { + return S3AFileSystem.this.allowAuthoritative(p); + } + } + /** * Low-level call to get at the object metadata. - * @param path path to the object + * @param path path to the object. This will be qualified. * @return metadata * @throws IOException IO and object access problems. */ @VisibleForTesting @Retries.RetryTranslated public ObjectMetadata getObjectMetadata(Path path) throws IOException { - return getObjectMetadata(path, null, invoker, null); + return getObjectMetadata(makeQualified(path), null, invoker, + "getObjectMetadata"); } /** @@ -1611,31 +1815,17 @@ public ObjectMetadata getObjectMetadata(Path path) throws IOException { * @return metadata * @throws IOException IO and object access problems. */ - @VisibleForTesting @Retries.RetryTranslated - public ObjectMetadata getObjectMetadata(Path path, + private ObjectMetadata getObjectMetadata(Path path, ChangeTracker changeTracker, Invoker changeInvoker, String operation) throws IOException { checkNotClosed(); - return once("getObjectMetadata", path.toString(), + String key = pathToKey(path); + return once(operation, path.toString(), () -> // this always does a full HEAD to the object getObjectMetadata( - pathToKey(path), changeTracker, changeInvoker, operation)); - } - - /** - * Get all the headers of the object of a path, if the object exists. - * @param path path to probe - * @return an immutable map of object headers. - * @throws IOException failure of the query - */ - @Retries.RetryTranslated - public Map getObjectHeaders(Path path) throws IOException { - LOG.debug("getObjectHeaders({})", path); - checkNotClosed(); - incrementReadOperations(); - return getObjectMetadata(path).getRawMetadata(); + key, changeTracker, changeInvoker, operation)); } /** @@ -1701,8 +1891,7 @@ protected void incrementStatistic(Statistic statistic) { * @param count the count to increment */ protected void incrementStatistic(Statistic statistic, long count) { - instrumentation.incrementCounter(statistic, count); - storageStatistics.incrementCounter(statistic, count); + statisticsContext.incrementCounter(statistic, count); } /** @@ -1711,7 +1900,7 @@ protected void incrementStatistic(Statistic statistic, long count) { * @param count the count to decrement */ protected void decrementGauge(Statistic statistic, long count) { - instrumentation.decrementGauge(statistic, count); + statisticsContext.decrementGauge(statistic, count); } /** @@ -1720,7 +1909,7 @@ protected void decrementGauge(Statistic statistic, long count) { * @param count the count to increment */ protected void incrementGauge(Statistic statistic, long count) { - instrumentation.incrementGauge(statistic, count); + statisticsContext.incrementGauge(statistic, count); } /** @@ -1733,6 +1922,7 @@ public void operationRetried(Exception ex) { if (isThrottleException(ex)) { operationThrottled(false); } else { + incrementStatistic(STORE_IO_RETRY); incrementStatistic(IGNORED_ERRORS); } } @@ -1784,11 +1974,11 @@ private void operationThrottled(boolean metastore) { LOG.debug("Request throttled on {}", metastore ? "S3": "DynamoDB"); if (metastore) { incrementStatistic(S3GUARD_METADATASTORE_THROTTLED); - instrumentation.addValueToQuantiles(S3GUARD_METADATASTORE_THROTTLE_RATE, + statisticsContext.addValueToQuantiles(S3GUARD_METADATASTORE_THROTTLE_RATE, 1); } else { incrementStatistic(STORE_IO_THROTTLED); - instrumentation.addValueToQuantiles(STORE_IO_THROTTLE_RATE, 1); + statisticsContext.addValueToQuantiles(STORE_IO_THROTTLE_RATE, 1); } } @@ -1801,6 +1991,27 @@ public S3AStorageStatistics getStorageStatistics() { return storageStatistics; } + /** + * Get the instrumentation's IOStatistics. + * @return statistics + */ + @Override + public IOStatistics getIOStatistics() { + return instrumentation != null + ? instrumentation.getIOStatistics() + : null; + } + + /** + * Get the factory for duration tracking. + * @return a factory from the instrumentation. + */ + protected DurationTrackerFactory getDurationTrackerFactory() { + return instrumentation != null ? + instrumentation.getDurationTrackerFactory() + : null; + } + /** * Request object metadata; increments counters in the process. * Retry policy: retry untranslated. @@ -1811,7 +2022,7 @@ public S3AStorageStatistics getStorageStatistics() { @Retries.RetryRaw @VisibleForTesting ObjectMetadata getObjectMetadata(String key) throws IOException { - return getObjectMetadata(key, null, invoker,null); + return getObjectMetadata(key, null, invoker, "getObjectMetadata"); } /** @@ -1838,15 +2049,30 @@ protected ObjectMetadata getObjectMetadata(String key, ObjectMetadata meta = changeInvoker.retryUntranslated("GET " + key, true, () -> { incrementStatistic(OBJECT_METADATA_REQUESTS); - LOG.debug("HEAD {} with change tracker {}", key, changeTracker); - if (changeTracker != null) { - changeTracker.maybeApplyConstraint(request); - } - ObjectMetadata objectMetadata = s3.getObjectMetadata(request); - if (changeTracker != null) { - changeTracker.processMetadata(objectMetadata, operation); + DurationTracker duration = getDurationTrackerFactory() + .trackDuration(ACTION_HTTP_HEAD_REQUEST.getSymbol()); + try { + LOG.debug("HEAD {} with change tracker {}", key, changeTracker); + if (changeTracker != null) { + changeTracker.maybeApplyConstraint(request); + } + ObjectMetadata objectMetadata = s3.getObjectMetadata(request); + if (changeTracker != null) { + changeTracker.processMetadata(objectMetadata, operation); + } + return objectMetadata; + } catch(AmazonServiceException ase) { + if (!isObjectNotFound(ase)) { + // file not found is not considered a failure of the call, + // so only switch the duration tracker to update failure + // metrics on other exception outcomes. + duration.failed(); + } + throw ase; + } finally { + // update the tracker. + duration.close(); } - return objectMetadata; }); incrementReadOperations(); return meta; @@ -1858,26 +2084,31 @@ protected ObjectMetadata getObjectMetadata(String key, * * Retry policy: retry untranslated. * @param request request to initiate + * @param trackerFactory duration tracking * @return the results * @throws IOException if the retry invocation raises one (it shouldn't). */ @Retries.RetryRaw - protected S3ListResult listObjects(S3ListRequest request) throws IOException { + protected S3ListResult listObjects(S3ListRequest request, + @Nullable final DurationTrackerFactory trackerFactory) + throws IOException { incrementReadOperations(); - incrementStatistic(OBJECT_LIST_REQUESTS); + LOG.debug("LIST {}", request); validateListArguments(request); try(DurationInfo ignored = new DurationInfo(LOG, false, "LIST")) { return invoker.retryUntranslated( request.toString(), true, - () -> { - if (useListV1) { - return S3ListResult.v1(s3.listObjects(request.getV1())); - } else { - return S3ListResult.v2(s3.listObjectsV2(request.getV2())); - } - }); + trackDurationOfOperation(trackerFactory, + OBJECT_LIST_REQUEST, + () -> { + if (useListV1) { + return S3ListResult.v1(s3.listObjects(request.getV1())); + } else { + return S3ListResult.v2(s3.listObjectsV2(request.getV2())); + } + })); } } @@ -1898,12 +2129,14 @@ private void validateListArguments(S3ListRequest request) { * Retry policy: retry untranslated. * @param request last list objects request to continue * @param prevResult last paged result to continue from + * @param trackerFactory duration tracking * @return the next result object * @throws IOException none, just there for retryUntranslated. */ @Retries.RetryRaw protected S3ListResult continueListObjects(S3ListRequest request, - S3ListResult prevResult) throws IOException { + S3ListResult prevResult, + final DurationTrackerFactory trackerFactory) throws IOException { incrementReadOperations(); validateListArguments(request); try(DurationInfo ignored = @@ -1911,17 +2144,19 @@ protected S3ListResult continueListObjects(S3ListRequest request, return invoker.retryUntranslated( request.toString(), true, - () -> { - incrementStatistic(OBJECT_CONTINUE_LIST_REQUESTS); - if (useListV1) { - return S3ListResult.v1( - s3.listNextBatchOfObjects(prevResult.getV1())); - } else { - request.getV2().setContinuationToken(prevResult.getV2() - .getNextContinuationToken()); - return S3ListResult.v2(s3.listObjectsV2(request.getV2())); - } - }); + trackDurationOfOperation( + trackerFactory, + OBJECT_CONTINUE_LIST_REQUEST, + () -> { + if (useListV1) { + return S3ListResult.v1( + s3.listNextBatchOfObjects(prevResult.getV1())); + } else { + request.getV2().setContinuationToken(prevResult.getV2() + .getNextContinuationToken()); + return S3ListResult.v2(s3.listObjectsV2(request.getV2())); + } + })); } } @@ -1966,8 +2201,10 @@ protected void deleteObject(String key) invoker.retryUntranslated(String.format("Delete %s:/%s", bucket, key), DELETE_CONSIDERED_IDEMPOTENT, ()-> { - incrementStatistic(OBJECT_DELETE_REQUESTS); - s3.deleteObject(bucket, key); + incrementStatistic(OBJECT_DELETE_OBJECTS); + trackDurationOfInvocation(getDurationTrackerFactory(), + OBJECT_DELETE_REQUEST.getSymbol(), + () -> s3.deleteObject(bucket, key)); return null; }); } @@ -2013,9 +2250,14 @@ private void blockRootDelete(String key) throws InvalidRequestException { } /** - * Perform a bulk object delete operation. + * Perform a bulk object delete operation against S3; leaves S3Guard + * alone. * Increments the {@code OBJECT_DELETE_REQUESTS} and write - * operation statistics. + * operation statistics + *

    + * {@code OBJECT_DELETE_OBJECTS} is updated with the actual number + * of objects deleted in the request. + *

    * Retry policy: retry untranslated; delete considered idempotent. * If the request is throttled, this is logged in the throttle statistics, * with the counter set to the number of keys, rather than the number @@ -2036,19 +2278,22 @@ private DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteRequest) incrementWriteOperations(); BulkDeleteRetryHandler retryHandler = new BulkDeleteRetryHandler(createStoreContext()); + int keyCount = deleteRequest.getKeys().size(); try(DurationInfo ignored = new DurationInfo(LOG, false, "DELETE %d keys", - deleteRequest.getKeys().size())) { + keyCount)) { return invoker.retryUntranslated("delete", DELETE_CONSIDERED_IDEMPOTENT, (text, e, r, i) -> { // handle the failure retryHandler.bulkDeleteRetried(deleteRequest, e); }, - () -> { - incrementStatistic(OBJECT_DELETE_REQUESTS, 1); - return s3.deleteObjects(deleteRequest); - }); + // duration is tracked in the bulk delete counters + trackDurationOfOperation(getDurationTrackerFactory(), + OBJECT_BULK_DELETE_REQUEST.getSymbol(), () -> { + incrementStatistic(OBJECT_DELETE_OBJECTS, keyCount); + return s3.deleteObjects(deleteRequest); + })); } catch (MultiObjectDeleteException e) { // one or more of the keys could not be deleted. // log and rethrow @@ -2169,6 +2414,7 @@ public UploadInfo putObject(PutObjectRequest putObjectRequest) { * not be saved to the metadata store and * fs.s3a.metadatastore.fail.on.write.error=true */ + @VisibleForTesting @Retries.OnceRaw("For PUT; post-PUT actions are RetryTranslated") PutObjectResult putObjectDirect(PutObjectRequest putObjectRequest) throws AmazonClientException, MetadataPersistenceException { @@ -2306,6 +2552,14 @@ private DeleteObjectsResult removeKeysS3( boolean quiet) throws MultiObjectDeleteException, AmazonClientException, IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Initiating delete operation for {} objects", + keysToDelete.size()); + for (DeleteObjectsRequest.KeyVersion key : keysToDelete) { + LOG.debug(" {} {}", key.getKey(), + key.getVersion() != null ? key.getVersion() : ""); + } + } DeleteObjectsResult result = null; if (keysToDelete.isEmpty()) { // exit fast if there are no keys to delete @@ -2415,7 +2669,8 @@ DeleteObjectsResult removeKeys( final boolean quiet) throws MultiObjectDeleteException, AmazonClientException, IOException { undeletedObjectsOnFailure.clear(); - try (DurationInfo ignored = new DurationInfo(LOG, false, "Deleting")) { + try (DurationInfo ignored = new DurationInfo(LOG, false, + "Deleting %d keys", keysToDelete.size())) { return removeKeysS3(keysToDelete, deleteFakeDir, quiet); } catch (MultiObjectDeleteException ex) { LOG.debug("Partial delete failure"); @@ -2426,8 +2681,8 @@ DeleteObjectsResult removeKeys( // entries so we only process these failures on "real" deletes. Triple, List, List>> results = new MultiObjectDeleteSupport(createStoreContext(), operationState) - .processDeleteFailure(ex, keysToDelete); - undeletedObjectsOnFailure.addAll(results.getMiddle()); + .processDeleteFailure(ex, keysToDelete, new ArrayList()); + undeletedObjectsOnFailure.addAll(results.getLeft()); } throw ex; } catch (AmazonClientException | IOException ex) { @@ -2498,7 +2753,7 @@ private void createFakeDirectoryIfNecessary(Path f) // we only make the LIST call; the codepaths to get here should not // be reached if there is an empty dir marker -and if they do, it // is mostly harmless to create a new one. - if (!key.isEmpty() && !s3Exists(f, EnumSet.of(StatusProbeEnum.List))) { + if (!key.isEmpty() && !s3Exists(f, StatusProbeEnum.DIRECTORIES)) { LOG.debug("Creating new fake directory at {}", f); createFakeDirectory(key); } @@ -2511,14 +2766,29 @@ private void createFakeDirectoryIfNecessary(Path f) * @throws IOException IO problem */ @Retries.RetryTranslated - void maybeCreateFakeParentDirectory(Path path) + @VisibleForTesting + protected void maybeCreateFakeParentDirectory(Path path) throws IOException, AmazonClientException { Path parent = path.getParent(); - if (parent != null) { + if (parent != null && !parent.isRoot()) { createFakeDirectoryIfNecessary(parent); } } + /** + * Override subclass such that we benefit for async listing done + * in {@code S3AFileSystem}. See {@code Listing#ObjectListingIterator}. + * {@inheritDoc} + * + */ + @Override + public RemoteIterator listStatusIterator(Path p) + throws FileNotFoundException, IOException { + RemoteIterator listStatusItr = once("listStatus", + p.toString(), () -> innerListStatus(p)); + return typeCastingRemoteIterator(listStatusItr); + } + /** * List the statuses of the files/directories in the given path if the path is * a directory. @@ -2530,7 +2800,9 @@ void maybeCreateFakeParentDirectory(Path path) */ public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException { - return once("listStatus", f.toString(), () -> innerListStatus(f)); + return once("listStatus", + f.toString(), + () -> iteratorToStatuses(innerListStatus(f), new HashSet<>())); } /** @@ -2543,50 +2815,52 @@ public FileStatus[] listStatus(Path f) throws FileNotFoundException, * @throws IOException due to an IO problem. * @throws AmazonClientException on failures inside the AWS SDK */ - public FileStatus[] innerListStatus(Path f) throws FileNotFoundException, - IOException, AmazonClientException { + private RemoteIterator innerListStatus(Path f) + throws FileNotFoundException, + IOException, AmazonClientException { Path path = qualify(f); - String key = pathToKey(path); LOG.debug("List status for path: {}", path); entryPoint(INVOCATION_LIST_STATUS); - List result; - final FileStatus fileStatus = getFileStatus(path); - - if (fileStatus.isDirectory()) { - if (!key.isEmpty()) { - key = key + '/'; - } - - boolean allowAuthoritative = allowAuthoritative(f); - DirListingMetadata dirMeta = - S3Guard.listChildrenWithTtl(metadataStore, path, ttlTimeProvider, - allowAuthoritative); - if (allowAuthoritative && dirMeta != null && dirMeta.isAuthoritative()) { - return S3Guard.dirMetaToStatuses(dirMeta); - } - - S3ListRequest request = createListObjectsRequest(key, "/"); - LOG.debug("listStatus: doing listObjects for directory {}", key); - - Listing.FileStatusListingIterator files = - listing.createFileStatusListingIterator(path, - request, - ACCEPT_ALL, - new Listing.AcceptAllButSelfAndS3nDirs(path)); - result = new ArrayList<>(files.getBatchSize()); - while (files.hasNext()) { - result.add(files.next()); + Triple, DirListingMetadata, Boolean> + statusesAssumingNonEmptyDir = listing + .getFileStatusesAssumingNonEmptyDir(path); + + if (!statusesAssumingNonEmptyDir.getLeft().hasNext() && + statusesAssumingNonEmptyDir.getRight()) { + // We are sure that this is an empty directory in auth mode. + return statusesAssumingNonEmptyDir.getLeft(); + } else if (!statusesAssumingNonEmptyDir.getLeft().hasNext()) { + // We may have an empty dir, or may have file or may have nothing. + // So we call innerGetFileStatus to get the status, this may throw + // FileNotFoundException if we have nothing. + // So We are guaranteed to have either a dir marker or a file. + final S3AFileStatus fileStatus = innerGetFileStatus(path, false, + StatusProbeEnum.ALL); + // If it is a file return directly. + if (fileStatus.isFile()) { + LOG.debug("Adding: rd (not a dir): {}", path); + S3AFileStatus[] stats = new S3AFileStatus[1]; + stats[0] = fileStatus; + return listing.createProvidedFileStatusIterator( + stats, + ACCEPT_ALL, + Listing.ACCEPT_ALL_BUT_S3N); } - // merge the results. This will update the store as needed - return S3Guard.dirListingUnion(metadataStore, path, result, dirMeta, - allowAuthoritative, ttlTimeProvider); - } else { - LOG.debug("Adding: rd (not a dir): {}", path); - FileStatus[] stats = new FileStatus[1]; - stats[0]= fileStatus; - return stats; } + // Here we have a directory which may or may not be empty. + // So we update the metastore and return. + return S3Guard.dirListingUnion( + metadataStore, + path, + statusesAssumingNonEmptyDir.getLeft(), + statusesAssumingNonEmptyDir.getMiddle(), + allowAuthoritative(path), + ttlTimeProvider, p -> + listing.createProvidedFileStatusIterator( + dirMetaToStatuses(statusesAssumingNonEmptyDir.getMiddle()), + ACCEPT_ALL, + Listing.ACCEPT_ALL_BUT_S3N)); } /** @@ -2694,9 +2968,10 @@ public UserGroupInformation getOwner() { public boolean mkdirs(Path path, FsPermission permission) throws IOException, FileAlreadyExistsException { try { + entryPoint(INVOCATION_MKDIRS); return innerMkdirs(path, permission); } catch (AmazonClientException e) { - throw translateException("innerMkdirs", path, e); + throw translateException("mkdirs", path, e); } } @@ -2716,11 +2991,15 @@ private boolean innerMkdirs(Path p, FsPermission permission) throws IOException, FileAlreadyExistsException, AmazonClientException { Path f = qualify(p); LOG.debug("Making directory: {}", f); - entryPoint(INVOCATION_MKDIRS); + if (p.isRoot()) { + // fast exit for root. + return true; + } FileStatus fileStatus; try { - fileStatus = getFileStatus(f); + fileStatus = innerGetFileStatus(f, false, + StatusProbeEnum.ALL); if (fileStatus.isDirectory()) { return true; @@ -2730,7 +3009,7 @@ private boolean innerMkdirs(Path p, FsPermission permission) } catch (FileNotFoundException e) { // Walk path to root, ensuring closest ancestor is a directory, not file Path fPart = f.getParent(); - while (fPart != null) { + while (fPart != null && !fPart.isRoot()) { try { fileStatus = getFileStatus(fPart); if (fileStatus.isDirectory()) { @@ -2791,7 +3070,8 @@ S3AFileStatus innerGetFileStatus(final Path f, final Set probes) throws IOException { final Path path = qualify(f); String key = pathToKey(path); - LOG.debug("Getting path status for {} ({})", path, key); + LOG.debug("Getting path status for {} ({}); needEmptyDirectory={}", + path, key, needEmptyDirectoryFlag); boolean allowAuthoritative = allowAuthoritative(path); // Check MetadataStore, if any. @@ -2802,9 +3082,10 @@ S3AFileStatus innerGetFileStatus(final Path f, } Set tombstones = Collections.emptySet(); if (pm != null) { + S3AFileStatus msStatus = pm.getFileStatus(); if (pm.isDeleted()) { OffsetDateTime deletedAt = OffsetDateTime.ofInstant( - Instant.ofEpochMilli(pm.getFileStatus().getModificationTime()), + Instant.ofEpochMilli(msStatus.getModificationTime()), ZoneOffset.UTC); throw new FileNotFoundException("Path " + path + " is recorded as " + "deleted by S3Guard at " + deletedAt); @@ -2815,7 +3096,7 @@ S3AFileStatus innerGetFileStatus(final Path f, // Skip going to s3 if the file checked is a directory. Because if the // dest is also a directory, there's no difference. - if (!pm.getFileStatus().isDirectory() && + if (!msStatus.isDirectory() && !allowAuthoritative && probes.contains(StatusProbeEnum.Head)) { // a file has been found in a non-auth path and the caller has not said @@ -2825,8 +3106,13 @@ S3AFileStatus innerGetFileStatus(final Path f, S3AFileStatus s3AFileStatus; try { - s3AFileStatus = s3GetFileStatus(path, key, probes, tombstones); + s3AFileStatus = s3GetFileStatus(path, + key, + probes, + tombstones, + needEmptyDirectoryFlag); } catch (FileNotFoundException fne) { + LOG.trace("File Not Found from probes for {}", key, fne); s3AFileStatus = null; } if (s3AFileStatus == null) { @@ -2845,42 +3131,55 @@ S3AFileStatus innerGetFileStatus(final Path f, } } - S3AFileStatus msStatus = pm.getFileStatus(); if (needEmptyDirectoryFlag && msStatus.isDirectory()) { + // the caller needs to know if a directory is empty, + // and that this is a directory. if (pm.isEmptyDirectory() != Tristate.UNKNOWN) { // We have a definitive true / false from MetadataStore, we are done. return msStatus; } else { + // execute a S3Guard listChildren command to list tombstones under the + // path. + // This list will be used in the forthcoming s3GetFileStatus call. DirListingMetadata children = S3Guard.listChildrenWithTtl(metadataStore, path, ttlTimeProvider, allowAuthoritative); if (children != null) { tombstones = children.listTombstones(); } - LOG.debug("MetadataStore doesn't know if dir is empty, using S3."); + LOG.debug("MetadataStore doesn't know if {} is empty, using S3.", + path); } } else { // Either this is not a directory, or we don't care if it is empty return msStatus; } - // If the metadata store has no children for it and it's not listed in - // S3 yet, we'll assume the empty directory is true; - S3AFileStatus s3FileStatus; + // now issue the S3 getFileStatus call. try { - s3FileStatus = s3GetFileStatus(path, key, probes, tombstones); + S3AFileStatus s3FileStatus = s3GetFileStatus(path, + key, + probes, + tombstones, + true); + // entry was found, so save in S3Guard and return the final value. + return S3Guard.putAndReturn(metadataStore, s3FileStatus, + ttlTimeProvider); } catch (FileNotFoundException e) { + // If the metadata store has no children for it and it's not listed in + // S3 yet, we'll conclude that it is an empty directory return S3AFileStatus.fromFileStatus(msStatus, Tristate.TRUE, null, null); } - // entry was found, save in S3Guard - return S3Guard.putAndReturn(metadataStore, s3FileStatus, - ttlTimeProvider); } else { // there was no entry in S3Guard // retrieve the data and update the metadata store in the process. return S3Guard.putAndReturn(metadataStore, - s3GetFileStatus(path, key, probes, tombstones), + s3GetFileStatus(path, + key, + probes, + tombstones, + needEmptyDirectoryFlag), ttlTimeProvider); } } @@ -2935,6 +3234,8 @@ S3AFileStatus innerGetFileStatus(final Path f, * @param key Key string for the path * @param probes probes to make * @param tombstones tombstones to filter + * @param needEmptyDirectoryFlag if true, implementation will calculate + * a TRUE or FALSE value for {@link S3AFileStatus#isEmptyDirectory()} * @return Status * @throws FileNotFoundException the supplied probes failed. * @throws IOException on other problems. @@ -2944,88 +3245,92 @@ S3AFileStatus innerGetFileStatus(final Path f, S3AFileStatus s3GetFileStatus(final Path path, final String key, final Set probes, - @Nullable Set tombstones) throws IOException { - if (!key.isEmpty()) { - if (probes.contains(StatusProbeEnum.Head) && !key.endsWith("/")) { - try { - // look for the simple file - ObjectMetadata meta = getObjectMetadata(key); - LOG.debug("Found exact file: normal file {}", key); - return new S3AFileStatus(meta.getContentLength(), - dateToLong(meta.getLastModified()), - path, - getDefaultBlockSize(path), - username, - meta.getETag(), - meta.getVersionId()); - } catch (AmazonServiceException e) { - // if the response is a 404 error, it just means that there is - // no file at that path...the remaining checks will be needed. - if (e.getStatusCode() != SC_404 || isUnknownBucket(e)) { - throw translateException("getFileStatus", path, e); - } - } catch (AmazonClientException e) { + @Nullable final Set tombstones, + final boolean needEmptyDirectoryFlag) throws IOException { + LOG.debug("S3GetFileStatus {}", path); + // either you aren't looking for the directory flag, or you are, + // and if you are, the probe list must contain list. + Preconditions.checkArgument(!needEmptyDirectoryFlag + || probes.contains(StatusProbeEnum.List), + "s3GetFileStatus(%s) wants to know if a directory is empty but" + + " does not request a list probe", path); + + if (key.isEmpty() && !needEmptyDirectoryFlag) { + return new S3AFileStatus(Tristate.UNKNOWN, path, username); + } + + if (!key.isEmpty() && !key.endsWith("/") + && probes.contains(StatusProbeEnum.Head)) { + try { + // look for the simple file + ObjectMetadata meta = getObjectMetadata(key); + LOG.debug("Found exact file: normal file {}", key); + return new S3AFileStatus(meta.getContentLength(), + dateToLong(meta.getLastModified()), + path, + getDefaultBlockSize(path), + username, + meta.getETag(), + meta.getVersionId()); + } catch (AmazonServiceException e) { + // if the response is a 404 error, it just means that there is + // no file at that path...the remaining checks will be needed. + if (e.getStatusCode() != SC_404 || isUnknownBucket(e)) { throw translateException("getFileStatus", path, e); } - } - - // Either a normal file was not found or the probe was skipped. - // because the key ended in "/" or it was not in the set of probes. - // Look for the dir marker - if (probes.contains(StatusProbeEnum.DirMarker)) { - String newKey = maybeAddTrailingSlash(key); - try { - ObjectMetadata meta = getObjectMetadata(newKey); - - if (objectRepresentsDirectory(newKey, meta.getContentLength())) { - LOG.debug("Found file (with /): fake directory"); - return new S3AFileStatus(Tristate.TRUE, path, username); - } else { - LOG.warn("Found file (with /): real file? should not happen: {}", - key); - - return new S3AFileStatus(meta.getContentLength(), - dateToLong(meta.getLastModified()), - path, - getDefaultBlockSize(path), - username, - meta.getETag(), - meta.getVersionId()); - } - } catch (AmazonServiceException e) { - if (e.getStatusCode() != SC_404 || isUnknownBucket(e)) { - throw translateException("getFileStatus", newKey, e); - } - } catch (AmazonClientException e) { - throw translateException("getFileStatus", newKey, e); - } + } catch (AmazonClientException e) { + throw translateException("getFileStatus", path, e); } } // execute the list if (probes.contains(StatusProbeEnum.List)) { try { + // this will find a marker dir / as well as an entry. + // When making a simple "is this a dir check" all is good. + // but when looking for an empty dir, we need to verify there are no + // children, so ask for two entries, so as to find + // a child String dirKey = maybeAddTrailingSlash(key); - S3ListRequest request = createListObjectsRequest(dirKey, "/", 1); - - S3ListResult objects = listObjects(request); + // list size is dir marker + at least one non-tombstone entry + // there's a corner case: more tombstones than you have in a + // single page list. We assume that if you have been deleting + // that many files, then the AWS listing will have purged some + // by the time of listing so that the response includes some + // which have not. + + int listSize; + if (tombstones == null) { + // no tombstones so look for a marker and at least one child. + listSize = 2; + } else { + // build a listing > tombstones. If the caller has many thousands + // of tombstones this won't work properly, which is why pruning + // of expired tombstones matters. + listSize = Math.min(2 + tombstones.size(), Math.max(2, maxKeys)); + } + S3ListRequest request = createListObjectsRequest(dirKey, "/", + listSize); + // execute the request + S3ListResult listResult = listObjects(request, + getDurationTrackerFactory()); - Collection prefixes = objects.getCommonPrefixes(); - Collection summaries = objects.getObjectSummaries(); - if (!isEmptyOfKeys(prefixes, tombstones) || - !isEmptyOfObjects(summaries, tombstones)) { + if (listResult.hasPrefixesOrObjects(contextAccessors, tombstones)) { if (LOG.isDebugEnabled()) { - LOG.debug("Found path as directory (with /): {}/{}", - prefixes.size(), summaries.size()); - - for (S3ObjectSummary summary : summaries) { - LOG.debug("Summary: {} {}", summary.getKey(), summary.getSize()); - } - for (String prefix : prefixes) { - LOG.debug("Prefix: {}", prefix); - } + LOG.debug("Found path as directory (with /)"); + listResult.logAtDebug(LOG); } - + // At least one entry has been found. + // If looking for an empty directory, the marker must exist but no + // children. + // So the listing must contain the marker entry only. + if (needEmptyDirectoryFlag + && listResult.representsEmptyDirectory( + contextAccessors, dirKey, tombstones)) { + return new S3AFileStatus(Tristate.TRUE, path, username); + } + // either an empty directory is not needed, or the + // listing does not meet the requirements. return new S3AFileStatus(Tristate.FALSE, path, username); } else if (key.isEmpty()) { LOG.debug("Found root directory"); @@ -3044,48 +3349,6 @@ S3AFileStatus s3GetFileStatus(final Path path, throw new FileNotFoundException("No such file or directory: " + path); } - /** - * Helper function to determine if a collection of paths is empty - * after accounting for tombstone markers (if provided). - * @param keys Collection of path (prefixes / directories or keys). - * @param tombstones Set of tombstone markers, or null if not applicable. - * @return false if summaries contains objects not accounted for by - * tombstones. - */ - private boolean isEmptyOfKeys(Collection keys, Set - tombstones) { - if (tombstones == null) { - return keys.isEmpty(); - } - for (String key : keys) { - Path qualified = keyToQualifiedPath(key); - if (!tombstones.contains(qualified)) { - return false; - } - } - return true; - } - - /** - * Helper function to determine if a collection of object summaries is empty - * after accounting for tombstone markers (if provided). - * @param summaries Collection of objects as returned by listObjects. - * @param tombstones Set of tombstone markers, or null if not applicable. - * @return false if summaries contains objects not accounted for by - * tombstones. - */ - private boolean isEmptyOfObjects(Collection summaries, - Set tombstones) { - if (tombstones == null) { - return summaries.isEmpty(); - } - Collection stringCollection = new ArrayList<>(summaries.size()); - for (S3ObjectSummary summary : summaries) { - stringCollection.add(summary.getKey()); - } - return isEmptyOfKeys(stringCollection, tombstones); - } - /** * Raw version of {@link FileSystem#exists(Path)} which uses S3 only: * S3Guard MetadataStore, if any, will be skipped. @@ -3100,7 +3363,7 @@ private boolean s3Exists(final Path path, final Set probes) throws IOException { String key = pathToKey(path); try { - s3GetFileStatus(path, key, probes, null); + s3GetFileStatus(path, key, probes, null, false); return true; } catch (FileNotFoundException e) { return false; @@ -3372,6 +3635,25 @@ public Token getDelegationToken(String renewer) } } + /** + * Ask any DT plugin for any extra token issuers. + * These do not get told of the encryption secrets and can + * return any type of token. + * This allows DT plugins to issue extra tokens for + * ancillary services. + */ + @Override + public DelegationTokenIssuer[] getAdditionalTokenIssuers() + throws IOException { + if (delegationTokens.isPresent()) { + return delegationTokens.get().getAdditionalTokenIssuers(); + } else { + // Delegation token support is not set up + LOG.debug("Token support is not enabled"); + return null; + } + } + /** * Build the AWS policy for restricted access to the resources needed * by this bucket. @@ -3436,8 +3718,9 @@ private CopyResult copyFile(String srcKey, String dstKey, long size, ChangeTracker changeTracker = new ChangeTracker( keyToQualifiedPath(srcKey).toString(), changeDetectionPolicy, - readContext.instrumentation.newInputStreamStatistics() - .getVersionMismatchCounter(), + readContext.getS3AStatisticsContext() + .newInputStreamStatistics() + .getChangeTrackerStatistics(), srcAttributes); String action = "copyFile(" + srcKey + ", " + dstKey + ")"; @@ -3484,6 +3767,7 @@ private CopyResult copyFile(String srcKey, String dstKey, long size, copyObjectRequest.setNewObjectMetadata(dstom); Optional.ofNullable(srcom.getStorageClass()) .ifPresent(copyObjectRequest::setStorageClass); + incrementStatistic(OBJECT_COPY_REQUESTS); Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); CopyOutcome copyOutcome = CopyOutcome.waitForCopy(copy); @@ -3617,16 +3901,21 @@ private Optional generateSSECustomerKey() { /** * Perform post-write actions. - * Calls {@link #deleteUnnecessaryFakeDirectories(Path)} and then - * updates any metastore. + *

    * This operation MUST be called after any PUT/multipart PUT completes * successfully. - * - * The operations actions include + *

    + * The actions include: *
      - *
    1. Calling {@link #deleteUnnecessaryFakeDirectories(Path)}
    2. - *
    3. Updating any metadata store with details on the newly created - * object.
    4. + *
    5. + * Calling + * {@link #deleteUnnecessaryFakeDirectories(Path, BulkOperationState)} + * if directory markers are not being retained. + *
    6. + *
    7. + * Updating any metadata store with details on the newly created + * object. + *
    8. *
    * @param key key written to * @param length total length of file written @@ -3649,12 +3938,19 @@ void finishedWrite(String key, long length, String eTag, String versionId, Preconditions.checkArgument(length >= 0, "content length is negative"); final boolean isDir = objectRepresentsDirectory(key, length); // kick off an async delete - final CompletableFuture deletion = submit( - unboundedThreadPool, - () -> { - deleteUnnecessaryFakeDirectories(p.getParent()); - return null; - }); + CompletableFuture deletion; + if (!keepDirectoryMarkers(p)) { + deletion = submit( + unboundedThreadPool, + () -> { + deleteUnnecessaryFakeDirectories( + p.getParent(), + operationState); + return null; + }); + } else { + deletion = null; + } // this is only set if there is a metastore to update and the // operationState parameter passed in was null. BulkOperationState stateToClose = null; @@ -3713,13 +4009,26 @@ void finishedWrite(String key, long length, String eTag, String versionId, } } + /** + * Should we keep directory markers under the path being created + * by mkdir/file creation/rename? + * @param path path to probe + * @return true if the markers MAY be retained, + * false if they MUST be deleted + */ + private boolean keepDirectoryMarkers(Path path) { + return directoryPolicy.keepDirectoryMarkers(path); + } + /** * Delete mock parent directories which are no longer needed. * Retry policy: retrying; exceptions swallowed. * @param path path + * @param operationState (nullable) operational state for a bulk update */ @Retries.RetryExceptionsSwallowed - private void deleteUnnecessaryFakeDirectories(Path path) { + private void deleteUnnecessaryFakeDirectories(Path path, + final BulkOperationState operationState) { List keysToRemove = new ArrayList<>(); while (!path.isRoot()) { String key = pathToKey(path); @@ -3729,7 +4038,7 @@ private void deleteUnnecessaryFakeDirectories(Path path) { path = path.getParent(); } try { - removeKeys(keysToRemove, true, null); + removeKeys(keysToRemove, true, operationState); } catch(AmazonClientException | IOException e) { instrumentation.errorIgnored(); if (LOG.isDebugEnabled()) { @@ -3792,59 +4101,8 @@ public int read() throws IOException { * @return a copy of {@link ObjectMetadata} with only relevant attributes */ private ObjectMetadata cloneObjectMetadata(ObjectMetadata source) { - // This approach may be too brittle, especially if - // in future there are new attributes added to ObjectMetadata - // that we do not explicitly call to set here ObjectMetadata ret = newObjectMetadata(source.getContentLength()); - - // Possibly null attributes - // Allowing nulls to pass breaks it during later use - if (source.getCacheControl() != null) { - ret.setCacheControl(source.getCacheControl()); - } - if (source.getContentDisposition() != null) { - ret.setContentDisposition(source.getContentDisposition()); - } - if (source.getContentEncoding() != null) { - ret.setContentEncoding(source.getContentEncoding()); - } - if (source.getContentMD5() != null) { - ret.setContentMD5(source.getContentMD5()); - } - if (source.getContentType() != null) { - ret.setContentType(source.getContentType()); - } - if (source.getExpirationTime() != null) { - ret.setExpirationTime(source.getExpirationTime()); - } - if (source.getExpirationTimeRuleId() != null) { - ret.setExpirationTimeRuleId(source.getExpirationTimeRuleId()); - } - if (source.getHttpExpiresDate() != null) { - ret.setHttpExpiresDate(source.getHttpExpiresDate()); - } - if (source.getLastModified() != null) { - ret.setLastModified(source.getLastModified()); - } - if (source.getOngoingRestore() != null) { - ret.setOngoingRestore(source.getOngoingRestore()); - } - if (source.getRestoreExpirationTime() != null) { - ret.setRestoreExpirationTime(source.getRestoreExpirationTime()); - } - if (source.getSSEAlgorithm() != null) { - ret.setSSEAlgorithm(source.getSSEAlgorithm()); - } - if (source.getSSECustomerAlgorithm() != null) { - ret.setSSECustomerAlgorithm(source.getSSECustomerAlgorithm()); - } - if (source.getSSECustomerKeyMd5() != null) { - ret.setSSECustomerKeyMd5(source.getSSECustomerKeyMd5()); - } - - for (Map.Entry e : source.getUserMetadata().entrySet()) { - ret.addUserMetadata(e.getKey(), e.getValue()); - } + getHeaderProcessing().cloneObjectMetadata(source, ret); return ret; } @@ -3858,6 +4116,14 @@ public long getDefaultBlockSize() { return getConf().getLongBytes(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE); } + /** + * Get the directory marker policy of this filesystem. + * @return the marker policy. + */ + public DirectoryPolicy getDirectoryMarkerPolicy() { + return directoryPolicy; + } + @Override public String toString() { final StringBuilder sb = new StringBuilder( @@ -3896,12 +4162,11 @@ public String toString() { sb.append(", credentials=").append(credentials); sb.append(", delegation tokens=") .append(delegationTokens.map(Objects::toString).orElse("disabled")); - sb.append(", statistics {") - .append(statistics) - .append("}"); - if (instrumentation != null) { - sb.append(", metrics {") - .append(instrumentation.dump("{", "=", "} ", true)) + sb.append(", ").append(directoryPolicy); + // if logging at debug, toString returns the entire IOStatistics set. + if (getInstrumentation() != null) { + sb.append(", instrumentation {") + .append(getInstrumentation().toString()) .append("}"); } sb.append('}'); @@ -3952,6 +4217,8 @@ public boolean isMagicCommitPath(Path path) { /** * Increments the statistic {@link Statistic#INVOCATION_GLOB_STATUS}. + * Override superclass so as to disable symlink resolution as symlinks + * are not supported by S3A. * {@inheritDoc} */ @Override @@ -3960,9 +4227,9 @@ public FileStatus[] globStatus(Path pathPattern) throws IOException { } /** - * Override superclass so as to disable symlink resolution and so avoid - * some calls to the FS which may have problems when the store is being - * inconsistent. + * Increments the statistic {@link Statistic#INVOCATION_GLOB_STATUS}. + * Override superclass so as to disable symlink resolution as symlinks + * are not supported by S3A. * {@inheritDoc} */ @Override @@ -3974,7 +4241,7 @@ public FileStatus[] globStatus( return Globber.createGlobber(this) .withPathPattern(pathPattern) .withPathFiltern(filter) - .withResolveSymlinks(true) + .withResolveSymlinks(false) .build() .glob(); } @@ -3990,25 +4257,41 @@ public boolean exists(Path f) throws IOException { } /** - * Override superclass so as to add statistic collection. + * Optimized probe for a path referencing a dir. + * Even though it is optimized to a single HEAD, applications + * should not over-use this method...it is all too common. * {@inheritDoc} */ @Override @SuppressWarnings("deprecation") public boolean isDirectory(Path f) throws IOException { entryPoint(INVOCATION_IS_DIRECTORY); - return super.isDirectory(f); + try { + return innerGetFileStatus(f, false, StatusProbeEnum.DIRECTORIES) + .isDirectory(); + } catch (FileNotFoundException e) { + // not found or it is a file. + return false; + } } /** - * Override superclass so as to add statistic collection. + * Optimized probe for a path referencing a file. + * Even though it is optimized to a single HEAD, applications + * should not over-use this method...it is all too common. * {@inheritDoc} */ @Override @SuppressWarnings("deprecation") public boolean isFile(Path f) throws IOException { entryPoint(INVOCATION_IS_FILE); - return super.isFile(f); + try { + return innerGetFileStatus(f, false, StatusProbeEnum.HEAD_ONLY) + .isFile(); + } catch (FileNotFoundException e) { + // not found or it is a dir. + return false; + } } /** @@ -4050,6 +4333,37 @@ public EtagChecksum getFileChecksum(Path f, final long length) } } + /** + * Get header processing support. + * @return the header processing of this instance. + */ + private HeaderProcessing getHeaderProcessing() { + return headerProcessing; + } + + @Override + public byte[] getXAttr(final Path path, final String name) + throws IOException { + return getHeaderProcessing().getXAttr(path, name); + } + + @Override + public Map getXAttrs(final Path path) throws IOException { + return getHeaderProcessing().getXAttrs(path); + } + + @Override + public Map getXAttrs(final Path path, + final List names) + throws IOException { + return getHeaderProcessing().getXAttrs(path, names); + } + + @Override + public List listXAttrs(final Path path) throws IOException { + return getHeaderProcessing().listXAttrs(path); + } + /** * {@inheritDoc}. * @@ -4083,21 +4397,6 @@ public RemoteIterator listFiles(Path f, new Listing.AcceptFilesOnly(qualify(f)), null, true, false)); } - private static RemoteIterator toLocatedFileStatusIterator( - RemoteIterator iterator) { - return new RemoteIterator() { - @Override - public boolean hasNext() throws IOException { - return iterator.hasNext(); - } - - @Override - public LocatedFileStatus next() throws IOException { - return iterator.next(); - } - }; - } - /** * Recursive List of files and empty directories. * @param f path to list from @@ -4176,75 +4475,38 @@ private RemoteIterator innerListFiles( Path path = qualify(f); LOG.debug("listFiles({}, {})", path, recursive); try { - // if a status was given, that is used, otherwise - // call getFileStatus, which triggers an existence check - final S3AFileStatus fileStatus = status != null - ? status - : (S3AFileStatus) getFileStatus(path); - if (fileStatus.isFile()) { + // if a status was given and it is a file. + if (status != null && status.isFile()) { // simple case: File - LOG.debug("Path is a file"); - return new Listing.SingleStatusRemoteIterator( - toLocatedFileStatus(fileStatus)); - } else { - // directory: do a bulk operation - String key = maybeAddTrailingSlash(pathToKey(path)); - String delimiter = recursive ? null : "/"; - LOG.debug("Requesting all entries under {} with delimiter '{}'", - key, delimiter); - final RemoteIterator cachedFilesIterator; - final Set tombstones; - boolean allowAuthoritative = allowAuthoritative(f); - if (recursive) { - final PathMetadata pm = metadataStore.get(path, true); - // shouldn't need to check pm.isDeleted() because that will have - // been caught by getFileStatus above. - MetadataStoreListFilesIterator metadataStoreListFilesIterator = - new MetadataStoreListFilesIterator(metadataStore, pm, - allowAuthoritative); - tombstones = metadataStoreListFilesIterator.listTombstones(); - // if all of the below is true - // - authoritative access is allowed for this metadatastore for this directory, - // - all the directory listings are authoritative on the client - // - the caller does not force non-authoritative access - // return the listing without any further s3 access - if (!forceNonAuthoritativeMS && - allowAuthoritative && - metadataStoreListFilesIterator.isRecursivelyAuthoritative()) { - S3AFileStatus[] statuses = S3Guard.iteratorToStatuses( - metadataStoreListFilesIterator, tombstones); - cachedFilesIterator = listing.createProvidedFileStatusIterator( - statuses, ACCEPT_ALL, acceptor); - return listing.createLocatedFileStatusIterator(cachedFilesIterator); - } - cachedFilesIterator = metadataStoreListFilesIterator; - } else { - DirListingMetadata meta = - S3Guard.listChildrenWithTtl(metadataStore, path, ttlTimeProvider, - allowAuthoritative); - if (meta != null) { - tombstones = meta.listTombstones(); - } else { - tombstones = null; - } - cachedFilesIterator = listing.createProvidedFileStatusIterator( - S3Guard.dirMetaToStatuses(meta), ACCEPT_ALL, acceptor); - if (allowAuthoritative && meta != null && meta.isAuthoritative()) { - // metadata listing is authoritative, so return it directly - return listing.createLocatedFileStatusIterator(cachedFilesIterator); - } + LOG.debug("Path is a file: {}", path); + return listing.createSingleStatusIterator( + toLocatedFileStatus(status)); + } + // Assuming the path to be a directory + // do a bulk operation. + RemoteIterator listFilesAssumingDir = + listing.getListFilesAssumingDir(path, + recursive, + acceptor, + collectTombstones, + forceNonAuthoritativeMS); + // If there are no list entries present, we + // fallback to file existence check as the path + // can be a file or empty directory. + if (!listFilesAssumingDir.hasNext()) { + // If file status was already passed, reuse it. + final S3AFileStatus fileStatus = status != null + ? status + : (S3AFileStatus) getFileStatus(path); + if (fileStatus.isFile()) { + return listing.createSingleStatusIterator( + toLocatedFileStatus(fileStatus)); } - return listing.createTombstoneReconcilingIterator( - listing.createLocatedFileStatusIterator( - listing.createFileStatusListingIterator(path, - createListObjectsRequest(key, delimiter), - ACCEPT_ALL, - acceptor, - cachedFilesIterator)), - collectTombstones ? tombstones : null); } + // If we have reached here, it means either there are files + // in this directory or it is empty. + return listFilesAssumingDir; } catch (AmazonClientException e) { - // TODO S3Guard: retry on file not found exception throw translateException("listFiles", path, e); } } @@ -4283,41 +4545,68 @@ public RemoteIterator listLocatedStatus(final Path f, RemoteIterator iterator = once("listLocatedStatus", path.toString(), () -> { - // lookup dir triggers existence check - final S3AFileStatus fileStatus = - (S3AFileStatus) getFileStatus(path); - if (fileStatus.isFile()) { - // simple case: File - LOG.debug("Path is a file"); - return new Listing.SingleStatusRemoteIterator( - filter.accept(path) ? toLocatedFileStatus(fileStatus) : null); - } else { - // directory: trigger a lookup - final String key = maybeAddTrailingSlash(pathToKey(path)); - final Listing.FileStatusAcceptor acceptor = - new Listing.AcceptAllButSelfAndS3nDirs(path); - boolean allowAuthoritative = allowAuthoritative(f); - DirListingMetadata meta = - S3Guard.listChildrenWithTtl(metadataStore, path, - ttlTimeProvider, allowAuthoritative); - final RemoteIterator cachedFileStatusIterator = - listing.createProvidedFileStatusIterator( - S3Guard.dirMetaToStatuses(meta), filter, acceptor); - return (allowAuthoritative && meta != null - && meta.isAuthoritative()) - ? listing.createLocatedFileStatusIterator( - cachedFileStatusIterator) - : listing.createLocatedFileStatusIterator( - listing.createFileStatusListingIterator(path, - createListObjectsRequest(key, "/"), - filter, - acceptor, - cachedFileStatusIterator)); + // Assuming the path to be a directory, + // trigger a list call directly. + final RemoteIterator + locatedFileStatusIteratorForDir = + listing.getLocatedFileStatusIteratorForDir(path, filter); + + // If no listing is present then path might be a file. + if (!locatedFileStatusIteratorForDir.hasNext()) { + final S3AFileStatus fileStatus = + (S3AFileStatus) getFileStatus(path); + if (fileStatus.isFile()) { + // simple case: File + LOG.debug("Path is a file"); + return listing.createSingleStatusIterator( + filter.accept(path) + ? toLocatedFileStatus(fileStatus) + : null); + } } + // Either empty or non-empty directory. + return locatedFileStatusIteratorForDir; }); return toLocatedFileStatusIterator(iterator); } + /** + * Generate list located status for a directory. + * Also performing tombstone reconciliation for guarded directories. + * @param dir directory to check. + * @param filter a path filter. + * @return an iterator that traverses statuses of the given dir. + * @throws IOException in case of failure. + */ + private RemoteIterator getLocatedFileStatusIteratorForDir( + Path dir, PathFilter filter) throws IOException { + final String key = maybeAddTrailingSlash(pathToKey(dir)); + final Listing.FileStatusAcceptor acceptor = + new Listing.AcceptAllButSelfAndS3nDirs(dir); + boolean allowAuthoritative = allowAuthoritative(dir); + DirListingMetadata meta = + S3Guard.listChildrenWithTtl(metadataStore, dir, + ttlTimeProvider, allowAuthoritative); + Set tombstones = meta != null + ? meta.listTombstones() + : null; + final RemoteIterator cachedFileStatusIterator = + listing.createProvidedFileStatusIterator( + dirMetaToStatuses(meta), filter, acceptor); + return (allowAuthoritative && meta != null + && meta.isAuthoritative()) + ? listing.createLocatedFileStatusIterator( + cachedFileStatusIterator) + : listing.createTombstoneReconcilingIterator( + listing.createLocatedFileStatusIterator( + listing.createFileStatusListingIterator(dir, + createListObjectsRequest(key, "/"), + filter, + acceptor, + cachedFileStatusIterator)), + tombstones); + } + /** * Build a {@link S3ALocatedFileStatus} from a {@link FileStatus} instance. * @param status file status @@ -4382,7 +4671,7 @@ public List listMultipartUploads(String prefix) */ @Retries.OnceRaw void abortMultipartUpload(String destKey, String uploadId) { - LOG.debug("Aborting multipart upload {} to {}", uploadId, destKey); + LOG.info("Aborting multipart upload {} to {}", uploadId, destKey); getAmazonS3Client().abortMultipartUpload( new AbortMultipartUploadRequest(getBucket(), destKey, @@ -4416,8 +4705,8 @@ void abortMultipartUpload(MultipartUpload upload) { * Create a new instance of the committer statistics. * @return a new committer statistics instance */ - public S3AInstrumentation.CommitterStatistics newCommitterStatistics() { - return instrumentation.newCommitterStatistics(); + public CommitterStatistics newCommitterStatistics() { + return statisticsContext.newCommitterStatistics(); } @SuppressWarnings("deprecation") @@ -4425,7 +4714,8 @@ public S3AInstrumentation.CommitterStatistics newCommitterStatistics() { public boolean hasPathCapability(final Path path, final String capability) throws IOException { final Path p = makeQualified(path); - switch (validatePathCapabilityArgs(p, capability)) { + String cap = validatePathCapabilityArgs(p, capability); + switch (cap) { case CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER: case CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER_OLD: @@ -4441,8 +4731,28 @@ public boolean hasPathCapability(final Path path, final String capability) return getConf().getBoolean(ETAG_CHECKSUM_ENABLED, ETAG_CHECKSUM_ENABLED_DEFAULT); + case CommonPathCapabilities.ABORTABLE_STREAM: + case CommonPathCapabilities.FS_MULTIPART_UPLOADER: + return true; + + // this client is safe to use with buckets + // containing directory markers anywhere in + // the hierarchy + case STORE_CAPABILITY_DIRECTORY_MARKER_AWARE: + return true; + + /* + * Marker policy capabilities are handed off. + */ + case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_KEEP: + case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE: + case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE: + case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP: + case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE: + return getDirectoryMarkerPolicy().hasPathCapability(path, cap); + default: - return super.hasPathCapability(p, capability); + return super.hasPathCapability(p, cap); } } @@ -4457,7 +4767,7 @@ public boolean hasPathCapability(final Path path, final String capability) @Override public boolean hasCapability(String capability) { try { - return hasPathCapability(workingDir, capability); + return hasPathCapability(new Path("/"), capability); } catch (IOException ex) { // should never happen, so log and downgrade. LOG.debug("Ignoring exception on hasCapability({}})", capability, ex); @@ -4531,8 +4841,9 @@ private FSDataInputStream select(final Path source, ChangeTracker changeTracker = new ChangeTracker(uri.toString(), changeDetectionPolicy, - readContext.instrumentation.newInputStreamStatistics() - .getVersionMismatchCounter(), + readContext.getS3AStatisticsContext() + .newInputStreamStatistics() + .getChangeTrackerStatistics(), objectAttributes); // will retry internally if wrong version detected @@ -4670,6 +4981,18 @@ public CompletableFuture openFileWithOptions( return result; } + @Override + public S3AMultipartUploaderBuilder createMultipartUploader( + final Path basePath) + throws IOException { + StoreContext ctx = createStoreContext(); + return new S3AMultipartUploaderBuilder(this, + getWriteOperationHelper(), + ctx, + basePath, + statisticsContext.createMultipartUploaderStatistics()); + } + /** * Build an immutable store context. * If called while the FS is being initialized, @@ -4679,24 +5002,42 @@ public CompletableFuture openFileWithOptions( */ @InterfaceAudience.Private public StoreContext createStoreContext() { - return new StoreContext( - getUri(), - getBucket(), - getConf(), - getUsername(), - owner, - boundedThreadPool, - executorCapacity, - invoker, - getInstrumentation(), - getStorageStatistics(), - getInputPolicy(), - changeDetectionPolicy, - enableMultiObjectsDelete, - metadataStore, - useListV1, - new ContextAccessorsImpl(), - getTtlTimeProvider()); + return new StoreContextBuilder().setFsURI(getUri()) + .setBucket(getBucket()) + .setConfiguration(getConf()) + .setUsername(getUsername()) + .setOwner(owner) + .setExecutor(boundedThreadPool) + .setExecutorCapacity(executorCapacity) + .setInvoker(invoker) + .setInstrumentation(statisticsContext) + .setStorageStatistics(getStorageStatistics()) + .setInputPolicy(getInputPolicy()) + .setChangeDetectionPolicy(changeDetectionPolicy) + .setMultiObjectDeleteEnabled(enableMultiObjectsDelete) + .setMetadataStore(metadataStore) + .setUseListV1(useListV1) + .setContextAccessors(new ContextAccessorsImpl()) + .setTimeProvider(getTtlTimeProvider()) + .build(); + } + + /** + * Create a marker tools operations binding for this store. + * @return callbacks for operations. + */ + @InterfaceAudience.Private + public MarkerToolOperations createMarkerToolOperations() { + return new MarkerToolOperationsImpl(operationCallbacks); + } + + /** + * This is purely for testing, as it force initializes all static + * initializers. See HADOOP-17385 for details. + */ + @InterfaceAudience.Private + public static void initializeClass() { + LOG.debug("Initialize S3A class"); } /** @@ -4724,5 +5065,17 @@ public File createTempFile(final String prefix, final long size) public String getBucketLocation() throws IOException { return S3AFileSystem.this.getBucketLocation(); } + + @Override + public Path makeQualified(final Path path) { + return S3AFileSystem.this.makeQualified(path); + } + + @Override + public ObjectMetadata getObjectMetadata(final String key) + throws IOException { + return once("getObjectMetadata", key, () -> + S3AFileSystem.this.getObjectMetadata(key)); + } } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java index 9c8b9ae7a156e..c725fdf37ff8a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java @@ -25,18 +25,22 @@ import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.amazonaws.services.s3.model.SSECustomerKey; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CanSetReadahead; import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; +import org.apache.hadoop.fs.s3a.impl.ChangeTracker; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.StreamCapabilities; -import org.apache.hadoop.fs.s3a.impl.ChangeTracker; +import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.statistics.DurationTracker; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,7 +72,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class S3AInputStream extends FSInputStream implements CanSetReadahead, - CanUnbuffer, StreamCapabilities { + CanUnbuffer, StreamCapabilities, IOStatisticsSource { public static final String E_NEGATIVE_READAHEAD_VALUE = "Negative readahead value"; @@ -87,6 +91,14 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead, * set */ private volatile boolean closed; + /** + * wrappedStream is associated with an object (instance of S3Object). When + * the object is garbage collected, the associated wrappedStream will be + * closed. Keep a reference to this object to prevent the wrapperStream + * still in use from being closed unexpectedly due to garbage collection. + * See HADOOP-17338 for details. + */ + private S3Object object; private S3ObjectInputStream wrappedStream; private final S3AReadOpContext context; private final AmazonS3 client; @@ -97,7 +109,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead, private final String uri; private static final Logger LOG = LoggerFactory.getLogger(S3AInputStream.class); - private final S3AInstrumentation.InputStreamStatistics streamStatistics; + private final S3AInputStreamStatistics streamStatistics; private S3AEncryptionMethods serverSideEncryptionAlgorithm; private String serverSideEncryptionKey; private S3AInputPolicy inputPolicy; @@ -123,6 +135,11 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead, /** change tracker. */ private final ChangeTracker changeTracker; + /** + * IOStatistics report. + */ + private final IOStatistics ioStatistics; + /** * Create the stream. * This does not attempt to open it; that is only done on the first @@ -146,13 +163,15 @@ public S3AInputStream(S3AReadOpContext ctx, this.contentLength = l; this.client = client; this.uri = "s3a://" + this.bucket + "/" + this.key; - this.streamStatistics = ctx.instrumentation.newInputStreamStatistics(); + this.streamStatistics = ctx.getS3AStatisticsContext() + .newInputStreamStatistics(); + this.ioStatistics = streamStatistics.getIOStatistics(); this.serverSideEncryptionAlgorithm = s3Attributes.getServerSideEncryptionAlgorithm(); this.serverSideEncryptionKey = s3Attributes.getServerSideEncryptionKey(); this.changeTracker = new ChangeTracker(uri, ctx.getChangeDetectionPolicy(), - streamStatistics.getVersionMismatchCounter(), + streamStatistics.getChangeTrackerStatistics(), s3Attributes); setInputPolicy(ctx.getInputPolicy()); setReadahead(ctx.getReadahead()); @@ -202,8 +221,21 @@ private synchronized void reopen(String reason, long targetPos, long length, String text = String.format("%s %s at %d", operation, uri, targetPos); changeTracker.maybeApplyConstraint(request); - S3Object object = Invoker.once(text, uri, - () -> client.getObject(request)); + DurationTracker tracker = streamStatistics.initiateGetRequest(); + try { + object = Invoker.once(text, uri, + () -> client.getObject(request)); + } catch(IOException e) { + // input function failed: note it + tracker.failed(); + // and rethrow + throw e; + } finally { + // update the tracker. + // this is called after any catch() call will have + // set the failed flag. + tracker.close(); + } changeTracker.processResponse(object, operation, targetPos); @@ -286,13 +318,11 @@ private void seekInStream(long targetPos, long length) throws IOException { if (skipForward) { // the forward seek range is within the limits LOG.debug("Forward seek on {}, of {} bytes", uri, diff); - streamStatistics.seekForwards(diff); long skipped = wrappedStream.skip(diff); if (skipped > 0) { pos += skipped; - // as these bytes have been read, they are included in the counter - incrementBytesRead(diff); } + streamStatistics.seekForwards(diff, skipped); if (pos == targetPos) { // all is well @@ -304,6 +334,9 @@ private void seekInStream(long targetPos, long length) throws IOException { LOG.warn("Failed to seek on {} to {}. Current position {}", uri, targetPos, pos); } + } else { + // not attempting to read any bytes from the stream + streamStatistics.seekForwards(diff, 0); } } else if (diff < 0) { // backwards seek @@ -348,7 +381,7 @@ private void lazySeek(long targetPos, long len) throws IOException { // open. After that, an exception generally means the file has changed // and there is no point retrying anymore. Invoker invoker = context.getReadInvoker(); - invoker.maybeRetry(streamStatistics.openOperations == 0, + invoker.maybeRetry(streamStatistics.getOpenOperations() == 0, "lazySeek", pathStr, true, () -> { //For lazy seek @@ -430,9 +463,15 @@ public synchronized int read() throws IOException { @Retries.OnceTranslated private void onReadFailure(IOException ioe, int length, boolean forceAbort) throws IOException { - - LOG.info("Got exception while trying to read from stream {}" + - " trying to recover: " + ioe, uri); + if (LOG.isDebugEnabled()) { + LOG.debug("Got exception while trying to read from stream {}, " + + "client: {} object: {}, trying to recover: ", + uri, client, object, ioe); + } else { + LOG.info("Got exception while trying to read from stream {}, " + + "client: {} object: {}, trying to recover: " + ioe, + uri, client, object); + } streamStatistics.readException(); reopen("failure recovery", pos, length, forceAbort); } @@ -550,14 +589,19 @@ public synchronized void close() throws IOException { */ @Retries.OnceRaw private void closeStream(String reason, long length, boolean forceAbort) { - if (isObjectStreamOpen()) { + if (!isObjectStreamOpen()) { + // steam is already closed + return; + } - // if the amount of data remaining in the current request is greater - // than the readahead value: abort. - long remaining = remainingInCurrentRequest(); - LOG.debug("Closing stream {}: {}", reason, - forceAbort ? "abort" : "soft"); - boolean shouldAbort = forceAbort || remaining > readahead; + // if the amount of data remaining in the current request is greater + // than the readahead value: abort. + long remaining = remainingInCurrentRequest(); + LOG.debug("Closing stream {}: {}", reason, + forceAbort ? "abort" : "soft"); + boolean shouldAbort = forceAbort || remaining > readahead; + + try { if (!shouldAbort) { try { // clean close. This will read to the end of the stream, @@ -578,25 +622,33 @@ private void closeStream(String reason, long length, boolean forceAbort) { streamStatistics.streamClose(false, drained); } catch (Exception e) { // exception escalates to an abort - LOG.debug("When closing {} stream for {}", uri, reason, e); + LOG.debug("When closing {} stream for {}, will abort the stream", + uri, reason, e); shouldAbort = true; } } if (shouldAbort) { // Abort, rather than just close, the underlying stream. Otherwise, the // remaining object payload is read from S3 while closing the stream. - LOG.debug("Aborting stream"); - wrappedStream.abort(); + LOG.debug("Aborting stream {}", uri); + try { + wrappedStream.abort(); + } catch (Exception e) { + LOG.warn("When aborting {} stream after failing to close it for {}", + uri, reason, e); + } streamStatistics.streamClose(true, remaining); } LOG.debug("Stream {} {}: {}; remaining={} streamPos={}," + " nextReadPos={}," + - " request range {}-{} length={}", + " request range {}-{} length={}", uri, (shouldAbort ? "aborted" : "closed"), reason, remaining, pos, nextReadPos, contentRangeStart, contentRangeFinish, length); + } finally { wrappedStream = null; + object = null; } } @@ -693,7 +745,7 @@ public String toString() { sb.append(" contentRangeFinish=").append(contentRangeFinish); sb.append(" remainingInCurrentRequest=") .append(remainingInCurrentRequest()); - sb.append(changeTracker); + sb.append(" ").append(changeTracker); sb.append('\n').append(s); sb.append('}'); return sb.toString(); @@ -747,7 +799,7 @@ public void readFully(long position, byte[] buffer, int offset, int length) */ @InterfaceAudience.Private @InterfaceStability.Unstable - public S3AInstrumentation.InputStreamStatistics getS3AStreamStatistics() { + public S3AInputStreamStatistics getS3AStreamStatistics() { return streamStatistics; } @@ -831,13 +883,14 @@ public synchronized void unbuffer() { try { closeStream("unbuffer()", contentRangeFinish, false); } finally { - streamStatistics.merge(false); + streamStatistics.unbuffered(); } } @Override public boolean hasCapability(String capability) { switch (toLowerCase(capability)) { + case StreamCapabilities.IOSTATISTICS: case StreamCapabilities.READAHEAD: case StreamCapabilities.UNBUFFER: return true; @@ -850,4 +903,9 @@ public boolean hasCapability(String capability) { boolean isObjectStreamOpen() { return wrappedStream != null; } + + @Override + public IOStatistics getIOStatistics() { + return ioStatistics; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java index b9918b5098946..dd28f3e59e037 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java @@ -18,14 +18,34 @@ package org.apache.hadoop.fs.s3a; -import com.google.common.annotations.VisibleForTesting; +import javax.annotation.Nullable; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.FileSystem.Statistics; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.s3a.s3guard.MetastoreInstrumentation; +import org.apache.hadoop.fs.s3a.statistics.impl.AbstractS3AStatisticsSource; +import org.apache.hadoop.fs.s3a.statistics.ChangeTrackerStatistics; +import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; +import org.apache.hadoop.fs.s3a.statistics.CountersAndGauges; +import org.apache.hadoop.fs.s3a.statistics.impl.CountingChangeTracker; +import org.apache.hadoop.fs.s3a.statistics.DelegationTokenStatistics; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; +import org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; +import org.apache.hadoop.fs.statistics.IOStatisticsLogging; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.StreamStatisticNames; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStoreBuilder; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricStringBuilder; import org.apache.hadoop.metrics2.MetricsCollector; @@ -43,208 +63,164 @@ import java.io.Closeable; import java.net.URI; +import java.time.Duration; +import java.util.EnumSet; import java.util.HashMap; import java.util.Map; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import static org.apache.hadoop.fs.s3a.Constants.STREAM_READ_GAUGE_INPUT_POLICY; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.demandStringifyIOStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.snapshotIOStatistics; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.ACTION_EXECUTOR_ACQUIRED; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.ACTION_HTTP_GET_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_FAILURES; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_UNBUFFERED; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; import static org.apache.hadoop.fs.s3a.Statistic.*; /** - * Instrumentation of S3a. - * Derived from the {@code AzureFileSystemInstrumentation}. - * + * Instrumentation of S3A. + *

    + * History + *

    + *
      + *
    1. + * HADOOP-13028. Initial implementation. + * Derived from the {@code AzureFileSystemInstrumentation}. + *
    2. + *
    3. + * Broadly (and directly) used in S3A. + * The use of direct references causes "problems" in mocking tests. + *
    4. + *
    5. + * HADOOP-16830. IOStatistics. Move to an interface and implementation + * design for the different inner classes. + *
    6. + *
    + *

    * Counters and metrics are generally addressed in code by their name or * {@link Statistic} key. There may be some Statistics which do * not have an entry here. To avoid attempts to access such counters failing, * the operations to increment/query metric values are designed to handle * lookup failures. + *

    + *

    + * S3AFileSystem StorageStatistics are dynamically derived from + * the IOStatistics. + *

    + *

    + * The toString() operation includes the entire IOStatistics when this + * class's log is set to DEBUG. This keeps the logs somewhat manageable + * on normal runs, but allows for more reporting. + *

    */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class S3AInstrumentation implements Closeable, MetricsSource { +public class S3AInstrumentation implements Closeable, MetricsSource, + CountersAndGauges, IOStatisticsSource { private static final Logger LOG = LoggerFactory.getLogger( S3AInstrumentation.class); private static final String METRICS_SOURCE_BASENAME = "S3AMetrics"; /** - * {@value #METRICS_SYSTEM_NAME} The name of the s3a-specific metrics + * {@value} The name of the s3a-specific metrics * system instance used for s3a metrics. */ public static final String METRICS_SYSTEM_NAME = "s3a-file-system"; /** - * {@value #CONTEXT} Currently all s3a metrics are placed in a single + * {@value} Currently all s3a metrics are placed in a single * "context". Distinct contexts may be used in the future. */ public static final String CONTEXT = "s3aFileSystem"; /** - * {@value #METRIC_TAG_FILESYSTEM_ID} The name of a field added to metrics + * {@value} The name of a field added to metrics * records that uniquely identifies a specific FileSystem instance. */ public static final String METRIC_TAG_FILESYSTEM_ID = "s3aFileSystemId"; /** - * {@value #METRIC_TAG_BUCKET} The name of a field added to metrics records + * {@value} The name of a field added to metrics records * that indicates the hostname portion of the FS URL. */ public static final String METRIC_TAG_BUCKET = "bucket"; // metricsSystemLock must be used to synchronize modifications to // metricsSystem and the following counters. - private static Object metricsSystemLock = new Object(); + private static final Object METRICS_SYSTEM_LOCK = new Object(); private static MetricsSystem metricsSystem = null; private static int metricsSourceNameCounter = 0; private static int metricsSourceActiveCounter = 0; + private final DurationTrackerFactory durationTrackerFactory; + private String metricsSourceName; private final MetricsRegistry registry = new MetricsRegistry("s3aFileSystem").setContext(CONTEXT); - private final MutableCounterLong streamOpenOperations; - private final MutableCounterLong streamCloseOperations; - private final MutableCounterLong streamClosed; - private final MutableCounterLong streamAborted; - private final MutableCounterLong streamSeekOperations; - private final MutableCounterLong streamReadExceptions; - private final MutableCounterLong streamForwardSeekOperations; - private final MutableCounterLong streamBackwardSeekOperations; - private final MutableCounterLong streamBytesSkippedOnSeek; - private final MutableCounterLong streamBytesBackwardsOnSeek; - private final MutableCounterLong streamBytesRead; - private final MutableCounterLong streamReadOperations; - private final MutableCounterLong streamReadFullyOperations; - private final MutableCounterLong streamReadsIncomplete; - private final MutableCounterLong streamBytesReadInClose; - private final MutableCounterLong streamBytesDiscardedInAbort; - private final MutableCounterLong ignoredErrors; private final MutableQuantiles putLatencyQuantile; private final MutableQuantiles throttleRateQuantile; private final MutableQuantiles s3GuardThrottleRateQuantile; - private final MutableCounterLong numberOfFilesCreated; - private final MutableCounterLong numberOfFilesCopied; - private final MutableCounterLong bytesOfFilesCopied; - private final MutableCounterLong numberOfFilesDeleted; - private final MutableCounterLong numberOfFakeDirectoryDeletes; - private final MutableCounterLong numberOfDirectoriesCreated; - private final MutableCounterLong numberOfDirectoriesDeleted; /** Instantiate this without caring whether or not S3Guard is enabled. */ private final S3GuardInstrumentation s3GuardInstrumentation = new S3GuardInstrumentation(); - private static final Statistic[] COUNTERS_TO_CREATE = { - INVOCATION_COPY_FROM_LOCAL_FILE, - INVOCATION_CREATE, - INVOCATION_CREATE_NON_RECURSIVE, - INVOCATION_DELETE, - INVOCATION_EXISTS, - INVOCATION_GET_DELEGATION_TOKEN, - INVOCATION_GET_FILE_CHECKSUM, - INVOCATION_GET_FILE_STATUS, - INVOCATION_GLOB_STATUS, - INVOCATION_IS_DIRECTORY, - INVOCATION_IS_FILE, - INVOCATION_LIST_FILES, - INVOCATION_LIST_LOCATED_STATUS, - INVOCATION_LIST_STATUS, - INVOCATION_MKDIRS, - INVOCATION_OPEN, - INVOCATION_RENAME, - OBJECT_COPY_REQUESTS, - OBJECT_DELETE_REQUESTS, - OBJECT_LIST_REQUESTS, - OBJECT_CONTINUE_LIST_REQUESTS, - OBJECT_METADATA_REQUESTS, - OBJECT_MULTIPART_UPLOAD_ABORTED, - OBJECT_PUT_BYTES, - OBJECT_PUT_REQUESTS, - OBJECT_PUT_REQUESTS_COMPLETED, - OBJECT_SELECT_REQUESTS, - STREAM_READ_VERSION_MISMATCHES, - STREAM_WRITE_FAILURES, - STREAM_WRITE_BLOCK_UPLOADS, - STREAM_WRITE_BLOCK_UPLOADS_COMMITTED, - STREAM_WRITE_BLOCK_UPLOADS_ABORTED, - STREAM_WRITE_TOTAL_TIME, - STREAM_WRITE_TOTAL_DATA, - COMMITTER_COMMITS_CREATED, - COMMITTER_COMMITS_COMPLETED, - COMMITTER_JOBS_SUCCEEDED, - COMMITTER_JOBS_FAILED, - COMMITTER_TASKS_SUCCEEDED, - COMMITTER_TASKS_FAILED, - COMMITTER_BYTES_COMMITTED, - COMMITTER_BYTES_UPLOADED, - COMMITTER_COMMITS_FAILED, - COMMITTER_COMMITS_ABORTED, - COMMITTER_COMMITS_REVERTED, - COMMITTER_MAGIC_FILES_CREATED, - S3GUARD_METADATASTORE_PUT_PATH_REQUEST, - S3GUARD_METADATASTORE_INITIALIZATION, - S3GUARD_METADATASTORE_RECORD_DELETES, - S3GUARD_METADATASTORE_RECORD_READS, - S3GUARD_METADATASTORE_RECORD_WRITES, - S3GUARD_METADATASTORE_RETRY, - S3GUARD_METADATASTORE_THROTTLED, - S3GUARD_METADATASTORE_AUTHORITATIVE_DIRECTORIES_UPDATED, - STORE_IO_THROTTLED, - DELEGATION_TOKENS_ISSUED, - FILES_DELETE_REJECTED - }; - - private static final Statistic[] GAUGES_TO_CREATE = { - OBJECT_PUT_REQUESTS_ACTIVE, - OBJECT_PUT_BYTES_PENDING, - STREAM_WRITE_BLOCK_UPLOADS_ACTIVE, - STREAM_WRITE_BLOCK_UPLOADS_PENDING, - STREAM_WRITE_BLOCK_UPLOADS_DATA_PENDING, - }; + /** + * This is the IOStatistics store for the S3AFileSystem + * instance. + * It is not kept in sync with the rest of the S3A instrumentation. + * Most inner statistics implementation classes only update this + * store when it is pushed back, such as as in close(). + */ + private final IOStatisticsStore instanceIOStatistics; + /** + * Construct the instrumentation for a filesystem. + * @param name URI of filesystem. + */ public S3AInstrumentation(URI name) { UUID fileSystemInstanceId = UUID.randomUUID(); registry.tag(METRIC_TAG_FILESYSTEM_ID, "A unique identifier for the instance", fileSystemInstanceId.toString()); registry.tag(METRIC_TAG_BUCKET, "Hostname from the FS URL", name.getHost()); - streamOpenOperations = counter(STREAM_OPENED); - streamCloseOperations = counter(STREAM_CLOSE_OPERATIONS); - streamClosed = counter(STREAM_CLOSED); - streamAborted = counter(STREAM_ABORTED); - streamSeekOperations = counter(STREAM_SEEK_OPERATIONS); - streamReadExceptions = counter(STREAM_READ_EXCEPTIONS); - streamForwardSeekOperations = - counter(STREAM_FORWARD_SEEK_OPERATIONS); - streamBackwardSeekOperations = - counter(STREAM_BACKWARD_SEEK_OPERATIONS); - streamBytesSkippedOnSeek = counter(STREAM_SEEK_BYTES_SKIPPED); - streamBytesBackwardsOnSeek = - counter(STREAM_SEEK_BYTES_BACKWARDS); - streamBytesRead = counter(STREAM_SEEK_BYTES_READ); - streamReadOperations = counter(STREAM_READ_OPERATIONS); - streamReadFullyOperations = - counter(STREAM_READ_FULLY_OPERATIONS); - streamReadsIncomplete = - counter(STREAM_READ_OPERATIONS_INCOMPLETE); - streamBytesReadInClose = counter(STREAM_CLOSE_BYTES_READ); - streamBytesDiscardedInAbort = counter(STREAM_ABORT_BYTES_DISCARDED); - numberOfFilesCreated = counter(FILES_CREATED); - numberOfFilesCopied = counter(FILES_COPIED); - bytesOfFilesCopied = counter(FILES_COPIED_BYTES); - numberOfFilesDeleted = counter(FILES_DELETED); - numberOfFakeDirectoryDeletes = counter(FAKE_DIRECTORIES_DELETED); - numberOfDirectoriesCreated = counter(DIRECTORIES_CREATED); - numberOfDirectoriesDeleted = counter(DIRECTORIES_DELETED); - ignoredErrors = counter(IGNORED_ERRORS); - for (Statistic statistic : COUNTERS_TO_CREATE) { - counter(statistic); - } - for (Statistic statistic : GAUGES_TO_CREATE) { - gauge(statistic.getSymbol(), statistic.getDescription()); - } + + // now set up the instance IOStatistics. + // create the builder + IOStatisticsStoreBuilder storeBuilder = iostatisticsStore(); + + // declare all counter statistics + EnumSet.allOf(Statistic.class).stream() + .filter(statistic -> + statistic.getType() == StatisticTypeEnum.TYPE_COUNTER) + .forEach(stat -> { + counter(stat); + storeBuilder.withCounters(stat.getSymbol()); + }); + // declare all gauge statistics + EnumSet.allOf(Statistic.class).stream() + .filter(statistic -> + statistic.getType() == StatisticTypeEnum.TYPE_GAUGE) + .forEach(stat -> { + gauge(stat); + storeBuilder.withGauges(stat.getSymbol()); + }); + + // and durations + EnumSet.allOf(Statistic.class).stream() + .filter(statistic -> + statistic.getType() == StatisticTypeEnum.TYPE_DURATION) + .forEach(stat -> { + duration(stat); + storeBuilder.withDurationTracking(stat.getSymbol()); + }); + //todo need a config for the quantiles interval? int interval = 1; putLatencyQuantile = quantiles(S3GUARD_METADATASTORE_PUT_PATH_LATENCY, @@ -254,12 +230,21 @@ public S3AInstrumentation(URI name) { throttleRateQuantile = quantiles(STORE_IO_THROTTLE_RATE, "events", "frequency (Hz)", interval); + // register with Hadoop metrics registerAsMetricsSource(name); + + // and build the IO Statistics + instanceIOStatistics = storeBuilder.build(); + + // duration track metrics (Success/failure) and IOStatistics. + durationTrackerFactory = IOStatisticsBinding.pairedTrackerFactory( + instanceIOStatistics, + new MetricDurationTrackerFactory()); } @VisibleForTesting public MetricsSystem getMetricsSystem() { - synchronized (metricsSystemLock) { + synchronized (METRICS_SYSTEM_LOCK) { if (metricsSystem == null) { metricsSystem = new MetricsSystemImpl(); metricsSystem.init(METRICS_SYSTEM_NAME); @@ -274,7 +259,7 @@ public MetricsSystem getMetricsSystem() { */ private void registerAsMetricsSource(URI name) { int number; - synchronized(metricsSystemLock) { + synchronized(METRICS_SYSTEM_LOCK) { getMetricsSystem(); metricsSourceActiveCounter++; @@ -304,6 +289,15 @@ protected final MutableCounterLong counter(Statistic op) { return counter(op.getSymbol(), op.getDescription()); } + /** + * Registering a duration adds the success and failure counters. + * @param op statistic to track + */ + protected final void duration(Statistic op) { + counter(op.getSymbol(), op.getDescription()); + counter(op.getSymbol() + SUFFIX_FAILURES, op.getDescription()); + } + /** * Create a gauge in the registry. * @param name name gauge name @@ -314,6 +308,15 @@ protected final MutableGaugeLong gauge(String name, String desc) { return registry.newGauge(name, desc, 0L); } + /** + * Create a gauge in the registry. + * @param op statistic to count + * @return the gauge + */ + protected final MutableGaugeLong gauge(Statistic op) { + return gauge(op.getSymbol(), op.getDescription()); + } + /** * Create a quantiles in the registry. * @param op statistic to collect @@ -390,7 +393,8 @@ private MutableCounterLong lookupCounter(String name) { } if (!(metric instanceof MutableCounterLong)) { throw new IllegalStateException("Metric " + name - + " is not a MutableCounterLong: " + metric); + + " is not a MutableCounterLong: " + metric + + " (type: " + metric.getClass() +")"); } return (MutableCounterLong) metric; } @@ -434,11 +438,57 @@ public MutableMetric lookupMetric(String name) { return metric; } + /** + * Get the instance IO Statistics. + * @return statistics. + */ + @Override + public IOStatisticsStore getIOStatistics() { + return instanceIOStatistics; + } + + /** + * Get the duration tracker factory. + * @return duration tracking for the instrumentation. + */ + public DurationTrackerFactory getDurationTrackerFactory() { + return durationTrackerFactory; + } + + /** + * The duration tracker updates the metrics with the count + * and IOStatistics will full duration information. + * @param key statistic key prefix + * @param count #of times to increment the matching counter in this + * operation. + * @return a duration tracker. + */ + @Override + public DurationTracker trackDuration(final String key, final long count) { + return durationTrackerFactory.trackDuration(key, count); + } + + /** + * String representation. Includes the IOStatistics + * when logging is at DEBUG. + * @return a string form. + */ + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "S3AInstrumentation{"); + if (LOG.isDebugEnabled()) { + sb.append("instanceIOStatistics=").append(instanceIOStatistics); + } + sb.append('}'); + return sb.toString(); + } + /** * Indicate that S3A created a file. */ public void fileCreated() { - numberOfFilesCreated.incr(); + incrementCounter(FILES_CREATED, 1); } /** @@ -446,7 +496,7 @@ public void fileCreated() { * @param count number of files. */ public void fileDeleted(int count) { - numberOfFilesDeleted.incr(count); + incrementCounter(FILES_DELETED, count); } /** @@ -454,21 +504,21 @@ public void fileDeleted(int count) { * @param count number of directory entries included in the delete request. */ public void fakeDirsDeleted(int count) { - numberOfFakeDirectoryDeletes.incr(count); + incrementCounter(FAKE_DIRECTORIES_DELETED, count); } /** * Indicate that S3A created a directory. */ public void directoryCreated() { - numberOfDirectoriesCreated.incr(); + incrementCounter(DIRECTORIES_CREATED, 1); } /** * Indicate that S3A just deleted a directory. */ public void directoryDeleted() { - numberOfDirectoriesDeleted.incr(); + incrementCounter(DIRECTORIES_DELETED, 1); } /** @@ -478,27 +528,44 @@ public void directoryDeleted() { * @param size total size in bytes */ public void filesCopied(int files, long size) { - numberOfFilesCopied.incr(files); - bytesOfFilesCopied.incr(size); + incrementCounter(FILES_COPIED, files); + incrementCounter(FILES_COPIED_BYTES, size); } /** * Note that an error was ignored. */ public void errorIgnored() { - ignoredErrors.incr(); + incrementCounter(IGNORED_ERRORS, 1); } /** - * Increment a specific counter. - * No-op if not defined. + * Increments a mutable counter and the matching + * instance IOStatistics counter. + * No-op if the counter is not defined, or the count == 0. * @param op operation * @param count increment value */ public void incrementCounter(Statistic op, long count) { - MutableCounterLong counter = lookupCounter(op.getSymbol()); - if (counter != null) { - counter.incr(count); + String name = op.getSymbol(); + if (count != 0) { + incrementMutableCounter(name, count); + instanceIOStatistics.incrementCounter(name, count); + } + } + + /** + * Increments a Mutable counter. + * No-op if not a positive integer. + * @param name counter name. + * @param count increment value + */ + private void incrementMutableCounter(final String name, final long count) { + if (count > 0) { + MutableCounterLong counter = lookupCounter(name); + if (counter != null) { + counter.incr(count); + } } } @@ -517,8 +584,10 @@ public void addValueToQuantiles(Statistic op, long value) { } /** - * Increment a specific counter. - * No-op if not defined. + * Increments a mutable counter and the matching + * instance IOStatistics counter with the value of + * the atomic long. + * No-op if the counter is not defined, or the count == 0. * @param op operation * @param count atomic long containing value */ @@ -558,12 +627,30 @@ public void decrementGauge(Statistic op, long count) { } } + /** + * Add the duration as a timed statistic, deriving + * statistic name from the operation symbol and the outcome. + * @param op operation + * @param success was the operation a success? + * @param duration how long did it take + */ + @Override + public void recordDuration(final Statistic op, + final boolean success, + final Duration duration) { + String name = op.getSymbol() + + (success ? "" : SUFFIX_FAILURES); + instanceIOStatistics.addTimedOperation(name, duration); + } + /** * Create a stream input statistics instance. * @return the new instance + * @param filesystemStatistics FS Statistics to update in close(). */ - public InputStreamStatistics newInputStreamStatistics() { - return new InputStreamStatistics(); + public S3AInputStreamStatistics newInputStreamStatistics( + @Nullable final FileSystem.Statistics filesystemStatistics) { + return new InputStreamStatistics(filesystemStatistics); } /** @@ -579,34 +666,8 @@ public MetastoreInstrumentation getS3GuardInstrumentation() { * Create a new instance of the committer statistics. * @return a new committer statistics instance */ - CommitterStatistics newCommitterStatistics() { - return new CommitterStatistics(); - } - - /** - * Merge in the statistics of a single input stream into - * the filesystem-wide statistics. - * @param statistics stream statistics - */ - private void mergeInputStreamStatistics(InputStreamStatistics statistics) { - streamOpenOperations.incr(statistics.openOperations); - streamCloseOperations.incr(statistics.closeOperations); - streamClosed.incr(statistics.closed); - streamAborted.incr(statistics.aborted); - streamSeekOperations.incr(statistics.seekOperations); - streamReadExceptions.incr(statistics.readExceptions); - streamForwardSeekOperations.incr(statistics.forwardSeekOperations); - streamBytesSkippedOnSeek.incr(statistics.bytesSkippedOnSeek); - streamBackwardSeekOperations.incr(statistics.backwardSeekOperations); - streamBytesBackwardsOnSeek.incr(statistics.bytesBackwardsOnSeek); - streamBytesRead.incr(statistics.bytesRead); - streamReadOperations.incr(statistics.readOperations); - streamReadFullyOperations.incr(statistics.readFullyOperations); - streamReadsIncomplete.incr(statistics.readsIncomplete); - streamBytesReadInClose.incr(statistics.bytesReadInClose); - streamBytesDiscardedInAbort.incr(statistics.bytesDiscardedInAbort); - incrementCounter(STREAM_READ_VERSION_MISMATCHES, - statistics.versionMismatches.get()); + public CommitterStatistics newCommitterStatistics() { + return new CommitterStatisticsImpl(); } @Override @@ -615,14 +676,15 @@ public void getMetrics(MetricsCollector collector, boolean all) { } public void close() { - synchronized (metricsSystemLock) { + synchronized (METRICS_SYSTEM_LOCK) { // it is critical to close each quantile, as they start a scheduled // task in a shared thread pool. putLatencyQuantile.stop(); throttleRateQuantile.stop(); s3GuardThrottleRateQuantile.stop(); metricsSystem.unregisterSource(metricsSourceName); - int activeSources = --metricsSourceActiveCounter; + metricsSourceActiveCounter--; + int activeSources = metricsSourceActiveCounter; if (activeSources == 0) { LOG.debug("Shutting down metrics publisher"); metricsSystem.publishMetricsNow(); @@ -633,164 +695,364 @@ public void close() { } /** - * Statistics updated by an input stream during its actual operation. - * These counters not thread-safe and are for use in a single instance - * of a stream. + * A duration tracker which updates a mutable counter with a metric. + * The metric is updated with the count on start; after a failure + * the failures count is incremented by one. + */ + private final class MetricUpdatingDurationTracker + implements DurationTracker { + + private final String symbol; + + private boolean failed; + + private MetricUpdatingDurationTracker( + final String symbol, + final long count) { + this.symbol = symbol; + incrementMutableCounter(symbol, count); + } + + @Override + public void failed() { + failed = true; + } + + /** + * Close: on failure increment any mutable counter of + * failures. + */ + @Override + public void close() { + if (failed) { + incrementMutableCounter(symbol + SUFFIX_FAILURES, 1); + } + } + } + + /** + * Duration Tracker Factory for updating metrics. */ - @InterfaceAudience.Private - @InterfaceStability.Unstable - public final class InputStreamStatistics implements AutoCloseable { - public long openOperations; - public long closeOperations; - public long closed; - public long aborted; - public long seekOperations; - public long readExceptions; - public long forwardSeekOperations; - public long backwardSeekOperations; - public long bytesRead; - public long bytesSkippedOnSeek; - public long bytesBackwardsOnSeek; - public long readOperations; - public long readFullyOperations; - public long readsIncomplete; - public long bytesReadInClose; - public long bytesDiscardedInAbort; - public long policySetCount; - public long inputPolicy; - /** This is atomic so that it can be passed as a reference. */ - private final AtomicLong versionMismatches = new AtomicLong(0); - private InputStreamStatistics mergedStats; - - private InputStreamStatistics() { + private final class MetricDurationTrackerFactory + implements DurationTrackerFactory { + + @Override + public DurationTracker trackDuration(final String key, final long count) { + return new MetricUpdatingDurationTracker(key, count); } + } + + /** + * Statistics updated by an S3AInputStream during its actual operation. + *

    + * When {@code unbuffer()} is called, the changed numbers are propagated + * to the S3AFileSystem metrics. + *

    + *

    + * When {@code close()} is called, the final set of numbers are propagated + * to the S3AFileSystem metrics. + *

    + * The {@link FileSystem.Statistics} statistics passed in are also + * updated. This ensures that whichever thread calls close() gets the + * total count of bytes read, even if any work is done in other + * threads. + * + */ + private final class InputStreamStatistics + extends AbstractS3AStatisticsSource + implements S3AInputStreamStatistics { + /** - * Seek backwards, incrementing the seek and backward seek counters. - * @param negativeOffset how far was the seek? - * This is expected to be negative. + * Distance used when incrementing FS stats. */ + private static final int DISTANCE = 5; + + /** + * FS statistics for the thread creating the stream. + */ + private final FileSystem.Statistics filesystemStatistics; + + /** + * The statistics from the last merge. + */ + private IOStatisticsSnapshot mergedStats; + + /* + The core counters are extracted to atomic longs for slightly + faster resolution on the critical paths, especially single byte + reads and the like. + */ + private final AtomicLong aborted; + private final AtomicLong backwardSeekOperations; + private final AtomicLong bytesBackwardsOnSeek; + private final AtomicLong bytesDiscardedInAbort; + /** Bytes read by the application. */ + private final AtomicLong bytesRead; + private final AtomicLong bytesDiscardedInClose; + private final AtomicLong bytesDiscardedOnSeek; + private final AtomicLong bytesSkippedOnSeek; + private final AtomicLong closed; + private final AtomicLong forwardSeekOperations; + private final AtomicLong openOperations; + private final AtomicLong readExceptions; + private final AtomicLong readsIncomplete; + private final AtomicLong readOperations; + private final AtomicLong readFullyOperations; + private final AtomicLong seekOperations; + + /** Bytes read by the application and any when draining streams . */ + private final AtomicLong totalBytesRead; + + /** + * Instantiate. + * @param filesystemStatistics FS Statistics to update in close(). + */ + private InputStreamStatistics( + @Nullable FileSystem.Statistics filesystemStatistics) { + this.filesystemStatistics = filesystemStatistics; + IOStatisticsStore st = iostatisticsStore() + .withCounters( + StreamStatisticNames.STREAM_READ_ABORTED, + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_ABORT, + StreamStatisticNames.STREAM_READ_CLOSED, + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_CLOSE, + StreamStatisticNames.STREAM_READ_CLOSE_OPERATIONS, + StreamStatisticNames.STREAM_READ_OPENED, + StreamStatisticNames.STREAM_READ_BYTES, + StreamStatisticNames.STREAM_READ_EXCEPTIONS, + StreamStatisticNames.STREAM_READ_FULLY_OPERATIONS, + StreamStatisticNames.STREAM_READ_OPERATIONS, + StreamStatisticNames.STREAM_READ_OPERATIONS_INCOMPLETE, + StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS, + StreamStatisticNames.STREAM_READ_SEEK_POLICY_CHANGED, + StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS, + StreamStatisticNames.STREAM_READ_SEEK_FORWARD_OPERATIONS, + StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS, + StreamStatisticNames.STREAM_READ_SEEK_BYTES_DISCARDED, + StreamStatisticNames.STREAM_READ_SEEK_BYTES_SKIPPED, + StreamStatisticNames.STREAM_READ_TOTAL_BYTES, + StreamStatisticNames.STREAM_READ_UNBUFFERED, + StreamStatisticNames.STREAM_READ_VERSION_MISMATCHES) + .withGauges(STREAM_READ_GAUGE_INPUT_POLICY) + .withDurationTracking(ACTION_HTTP_GET_REQUEST) + .build(); + setIOStatistics(st); + aborted = st.getCounterReference( + StreamStatisticNames.STREAM_READ_ABORTED); + backwardSeekOperations = st.getCounterReference( + StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS); + bytesBackwardsOnSeek = st.getCounterReference( + StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS); + bytesDiscardedInAbort = st.getCounterReference( + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_ABORT); + bytesRead = st.getCounterReference( + StreamStatisticNames.STREAM_READ_BYTES); + bytesDiscardedInClose = st.getCounterReference( + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_CLOSE); + bytesDiscardedOnSeek = st.getCounterReference( + StreamStatisticNames.STREAM_READ_SEEK_BYTES_DISCARDED); + bytesSkippedOnSeek = st.getCounterReference( + StreamStatisticNames.STREAM_READ_SEEK_BYTES_SKIPPED); + closed = st.getCounterReference( + StreamStatisticNames.STREAM_READ_CLOSED); + forwardSeekOperations = st.getCounterReference( + StreamStatisticNames.STREAM_READ_SEEK_FORWARD_OPERATIONS); + openOperations = st.getCounterReference( + StreamStatisticNames.STREAM_READ_OPENED); + readExceptions = st.getCounterReference( + StreamStatisticNames.STREAM_READ_EXCEPTIONS); + readsIncomplete = st.getCounterReference( + StreamStatisticNames.STREAM_READ_OPERATIONS_INCOMPLETE); + readOperations = st.getCounterReference( + StreamStatisticNames.STREAM_READ_OPERATIONS); + readFullyOperations = st.getCounterReference( + StreamStatisticNames.STREAM_READ_FULLY_OPERATIONS); + seekOperations = st.getCounterReference( + StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS); + totalBytesRead = st.getCounterReference( + StreamStatisticNames.STREAM_READ_TOTAL_BYTES); + setIOStatistics(st); + // create initial snapshot of merged statistics + mergedStats = snapshotIOStatistics(st); + } + + /** + * Increment a named counter by one. + * @param name counter name + * @return the new value + */ + private long increment(String name) { + return increment(name, 1); + } + + /** + * Increment a named counter by a given value. + * @param name counter name + * @param value value to increment by. + * @return the new value + */ + private long increment(String name, long value) { + return incCounter(name, value); + } + + /** + * {@inheritDoc}. + * Increments the number of seek operations, + * and backward seek operations. + * The offset is inverted and used as the increment + * of {@link #bytesBackwardsOnSeek}. + */ + @Override public void seekBackwards(long negativeOffset) { - seekOperations++; - backwardSeekOperations++; - bytesBackwardsOnSeek -= negativeOffset; + seekOperations.incrementAndGet(); + backwardSeekOperations.incrementAndGet(); + bytesBackwardsOnSeek.addAndGet(-negativeOffset); } /** - * Record a forward seek, adding a seek operation, a forward - * seek operation, and any bytes skipped. - * @param skipped number of bytes skipped by reading from the stream. - * If the seek was implemented by a close + reopen, set this to zero. + * {@inheritDoc}. + * Increment the number of seek and forward seek + * operations, as well as counters of bytes skipped + * and bytes read in seek, where appropriate. + * Bytes read in seek are also added to the totalBytesRead + * counter. */ - public void seekForwards(long skipped) { - seekOperations++; - forwardSeekOperations++; + @Override + public void seekForwards(final long skipped, + long bytesReadInSeek) { + seekOperations.incrementAndGet(); + forwardSeekOperations.incrementAndGet(); if (skipped > 0) { - bytesSkippedOnSeek += skipped; + bytesSkippedOnSeek.addAndGet(skipped); + } + if (bytesReadInSeek > 0) { + bytesDiscardedOnSeek.addAndGet(bytesReadInSeek); + totalBytesRead.addAndGet(bytesReadInSeek); } } /** - * The inner stream was opened. - * @return the previous count + * {@inheritDoc}. + * Use {@code getAnIncrement()} on {@link #openOperations} + * so that on invocation 1 it returns 0. + * The caller will know that this is the first invocation. */ + @Override public long streamOpened() { - long count = openOperations; - openOperations++; - return count; + return openOperations.getAndIncrement(); } /** - * The inner stream was closed. - * @param abortedConnection flag to indicate the stream was aborted, - * rather than closed cleanly - * @param remainingInCurrentRequest the number of bytes remaining in - * the current request. + * {@inheritDoc}. + * If the connection was aborted, increment {@link #aborted} + * and add the byte's remaining count to {@link #bytesDiscardedInAbort}. + * If not aborted, increment {@link #closed} and + * then {@link #bytesDiscardedInClose} and {@link #totalBytesRead} + * with the bytes remaining value. */ + @Override public void streamClose(boolean abortedConnection, long remainingInCurrentRequest) { - closeOperations++; if (abortedConnection) { - this.aborted++; - bytesDiscardedInAbort += remainingInCurrentRequest; + // the connection was aborted. + // update the counter of abort() calls and bytes discarded + aborted.incrementAndGet(); + bytesDiscardedInAbort.addAndGet(remainingInCurrentRequest); } else { - closed++; - bytesReadInClose += remainingInCurrentRequest; + // connection closed, possibly draining the stream of surplus + // bytes. + closed.incrementAndGet(); + bytesDiscardedInClose.addAndGet(remainingInCurrentRequest); + totalBytesRead.addAndGet(remainingInCurrentRequest); } } /** - * An ignored stream read exception was received. + * {@inheritDoc}. */ + @Override public void readException() { - readExceptions++; + readExceptions.incrementAndGet(); } /** - * Increment the bytes read counter by the number of bytes; - * no-op if the argument is negative. - * @param bytes number of bytes read + * {@inheritDoc}. + * If the byte counter is positive, increment bytesRead and totalBytesRead. */ + @Override public void bytesRead(long bytes) { if (bytes > 0) { - bytesRead += bytes; + bytesRead.addAndGet(bytes); + totalBytesRead.addAndGet(bytes); } } - /** - * A {@code read(byte[] buf, int off, int len)} operation has started. - * @param pos starting position of the read - * @param len length of bytes to read - */ + @Override public void readOperationStarted(long pos, long len) { - readOperations++; + readOperations.incrementAndGet(); } - /** - * A {@code PositionedRead.read(position, buffer, offset, length)} - * operation has just started. - * @param pos starting position of the read - * @param len length of bytes to read - */ + @Override public void readFullyOperationStarted(long pos, long len) { - readFullyOperations++; + readFullyOperations.incrementAndGet(); } /** - * A read operation has completed. - * @param requested number of requested bytes - * @param actual the actual number of bytes + * {@inheritDoc}. + * If more data was requested than was actually returned, this + * was an incomplete read. Increment {@link #readsIncomplete}. */ + @Override public void readOperationCompleted(int requested, int actual) { if (requested > actual) { - readsIncomplete++; + readsIncomplete.incrementAndGet(); } } /** - * Close triggers the merge of statistics into the filesystem's + * {@code close()} merges the stream statistics into the filesystem's * instrumentation instance. */ @Override public void close() { + increment(StreamStatisticNames.STREAM_READ_CLOSE_OPERATIONS); merge(true); } /** - * The input policy has been switched. - * @param updatedPolicy enum value of new policy. + * {@inheritDoc}. + * As well as incrementing the {@code STREAM_READ_SEEK_POLICY_CHANGED} + * counter, the + * {@code STREAM_READ_GAUGE_INPUT_POLICY} gauge is set to the new value. + * */ + @Override public void inputPolicySet(int updatedPolicy) { - policySetCount++; - inputPolicy = updatedPolicy; + increment(StreamStatisticNames.STREAM_READ_SEEK_POLICY_CHANGED); + localIOStatistics().setGauge(STREAM_READ_GAUGE_INPUT_POLICY, + updatedPolicy); + } + + /** + * Get the inner class's IO Statistics. This is + * needed to avoid findbugs warnings about ambiguity. + * @return the Input Stream's statistics. + */ + private IOStatisticsStore localIOStatistics() { + return InputStreamStatistics.super.getIOStatistics(); } /** - * Get a reference to the version mismatch counter. - * @return a counter which can be incremented. + * The change tracker increments {@code versionMismatches} on any + * mismatch. + * @return change tracking. */ - public AtomicLong getVersionMismatchCounter() { - return versionMismatches; + @Override + public ChangeTrackerStatistics getChangeTrackerStatistics() { + return new CountingChangeTracker( + localIOStatistics().getCounterReference( + StreamStatisticNames.STREAM_READ_VERSION_MISMATCHES)); } /** @@ -804,323 +1066,512 @@ public AtomicLong getVersionMismatchCounter() { public String toString() { final StringBuilder sb = new StringBuilder( "StreamStatistics{"); - sb.append("OpenOperations=").append(openOperations); - sb.append(", CloseOperations=").append(closeOperations); - sb.append(", Closed=").append(closed); - sb.append(", Aborted=").append(aborted); - sb.append(", SeekOperations=").append(seekOperations); - sb.append(", ReadExceptions=").append(readExceptions); - sb.append(", ForwardSeekOperations=") - .append(forwardSeekOperations); - sb.append(", BackwardSeekOperations=") - .append(backwardSeekOperations); - sb.append(", BytesSkippedOnSeek=").append(bytesSkippedOnSeek); - sb.append(", BytesBackwardsOnSeek=").append(bytesBackwardsOnSeek); - sb.append(", BytesRead=").append(bytesRead); - sb.append(", BytesRead excluding skipped=") - .append(bytesRead - bytesSkippedOnSeek); - sb.append(", ReadOperations=").append(readOperations); - sb.append(", ReadFullyOperations=").append(readFullyOperations); - sb.append(", ReadsIncomplete=").append(readsIncomplete); - sb.append(", BytesReadInClose=").append(bytesReadInClose); - sb.append(", BytesDiscardedInAbort=").append(bytesDiscardedInAbort); - sb.append(", InputPolicy=").append(inputPolicy); - sb.append(", InputPolicySetCount=").append(policySetCount); - sb.append(", versionMismatches=").append(versionMismatches.get()); + sb.append(IOStatisticsLogging.ioStatisticsToString( + localIOStatistics())); sb.append('}'); return sb.toString(); } + /** + * {@inheritDoc} + * Increment the counter {@code STREAM_READ_UNBUFFERED} + * and then merge the current set of statistics into the + * FileSystem's statistics through {@link #merge(boolean)}. + */ + @Override + public void unbuffered() { + increment(STREAM_READ_UNBUFFERED); + merge(false); + } + /** * Merge the statistics into the filesystem's instrumentation instance. - * Takes a diff between the current version of the stats and the - * version of the stats when merge was last called, and merges the diff - * into the instrumentation instance. Used to periodically merge the - * stats into the fs-wide stats. Behavior is undefined if called on a - * closed instance. + *

    + * If the merge is invoked because the stream has been closed, + * then all statistics are merged, and the filesystem + * statistics of {@link #filesystemStatistics} updated + * with the bytes read values. + *

    + *

    + * Whichever thread close()d the stream will have its counters + * updated. + *

    + *

    + * If the merge is due to an unbuffer() call, the change in all + * counters since the last merge will be pushed to the Instrumentation's + * counters. + *

    + * + * @param isClosed is this merge invoked because the stream is closed? */ - void merge(boolean isClosed) { - if (mergedStats != null) { - mergeInputStreamStatistics(diff(mergedStats)); - } else { - mergeInputStreamStatistics(this); - } - // If stats are closed, no need to create another copy - if (!isClosed) { - mergedStats = copy(); + private void merge(boolean isClosed) { + + IOStatisticsStore ioStatistics = localIOStatistics(); + LOG.debug("Merging statistics into FS statistics in {}: {}", + (isClosed ? "close()" : "unbuffer()"), + demandStringifyIOStatistics(ioStatistics)); + promoteInputStreamCountersToMetrics(); + mergedStats = snapshotIOStatistics(localIOStatistics()); + + if (isClosed) { + // stream is being closed. + // merge in all the IOStatistics + S3AInstrumentation.this.getIOStatistics().aggregate(ioStatistics); + + // increment the filesystem statistics for this thread. + if (filesystemStatistics != null) { + long t = getTotalBytesRead(); + filesystemStatistics.incrementBytesRead(t); + filesystemStatistics.incrementBytesReadByDistance(DISTANCE, t); + } } } /** - * Returns a diff between this {@link InputStreamStatistics} instance and - * the given {@link InputStreamStatistics} instance. + * Propagate a counter from the instance-level statistics + * to the S3A instrumentation, subtracting the previous merged value. + * @param name statistic to promote */ - private InputStreamStatistics diff(InputStreamStatistics inputStats) { - InputStreamStatistics diff = new InputStreamStatistics(); - diff.openOperations = openOperations - inputStats.openOperations; - diff.closeOperations = closeOperations - inputStats.closeOperations; - diff.closed = closed - inputStats.closed; - diff.aborted = aborted - inputStats.aborted; - diff.seekOperations = seekOperations - inputStats.seekOperations; - diff.readExceptions = readExceptions - inputStats.readExceptions; - diff.forwardSeekOperations = - forwardSeekOperations - inputStats.forwardSeekOperations; - diff.backwardSeekOperations = - backwardSeekOperations - inputStats.backwardSeekOperations; - diff.bytesRead = bytesRead - inputStats.bytesRead; - diff.bytesSkippedOnSeek = - bytesSkippedOnSeek - inputStats.bytesSkippedOnSeek; - diff.bytesBackwardsOnSeek = - bytesBackwardsOnSeek - inputStats.bytesBackwardsOnSeek; - diff.readOperations = readOperations - inputStats.readOperations; - diff.readFullyOperations = - readFullyOperations - inputStats.readFullyOperations; - diff.readsIncomplete = readsIncomplete - inputStats.readsIncomplete; - diff.bytesReadInClose = bytesReadInClose - inputStats.bytesReadInClose; - diff.bytesDiscardedInAbort = - bytesDiscardedInAbort - inputStats.bytesDiscardedInAbort; - diff.policySetCount = policySetCount - inputStats.policySetCount; - diff.inputPolicy = inputPolicy - inputStats.inputPolicy; - diff.versionMismatches.set(versionMismatches.longValue() - - inputStats.versionMismatches.longValue()); - return diff; + void promoteIOCounter(String name) { + incrementMutableCounter(name, + lookupCounterValue(name) + - mergedStats.counters().get(name)); } /** - * Returns a new {@link InputStreamStatistics} instance with all the same - * values as this {@link InputStreamStatistics}. + * Merge in the statistics of a single input stream into + * the filesystem-wide metrics counters. + * This does not update the FS IOStatistics values. */ - private InputStreamStatistics copy() { - InputStreamStatistics copy = new InputStreamStatistics(); - copy.openOperations = openOperations; - copy.closeOperations = closeOperations; - copy.closed = closed; - copy.aborted = aborted; - copy.seekOperations = seekOperations; - copy.readExceptions = readExceptions; - copy.forwardSeekOperations = forwardSeekOperations; - copy.backwardSeekOperations = backwardSeekOperations; - copy.bytesRead = bytesRead; - copy.bytesSkippedOnSeek = bytesSkippedOnSeek; - copy.bytesBackwardsOnSeek = bytesBackwardsOnSeek; - copy.readOperations = readOperations; - copy.readFullyOperations = readFullyOperations; - copy.readsIncomplete = readsIncomplete; - copy.bytesReadInClose = bytesReadInClose; - copy.bytesDiscardedInAbort = bytesDiscardedInAbort; - copy.policySetCount = policySetCount; - copy.inputPolicy = inputPolicy; - return copy; + private void promoteInputStreamCountersToMetrics() { + // iterate through all the counters + localIOStatistics().counters() + .keySet().stream() + .forEach(e -> promoteIOCounter(e)); } + + @Override + public long getCloseOperations() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_CLOSE_OPERATIONS); + } + + @Override + public long getClosed() { + return lookupCounterValue(StreamStatisticNames.STREAM_READ_CLOSED); + } + + @Override + public long getAborted() { + return lookupCounterValue(StreamStatisticNames.STREAM_READ_ABORTED); + } + + @Override + public long getForwardSeekOperations() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_SEEK_FORWARD_OPERATIONS); + } + + @Override + public long getBackwardSeekOperations() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS); + } + + @Override + public long getBytesRead() { + return lookupCounterValue(StreamStatisticNames.STREAM_READ_BYTES); + } + + @Override + public long getTotalBytesRead() { + return lookupCounterValue(StreamStatisticNames.STREAM_READ_TOTAL_BYTES); + } + + @Override + public long getBytesSkippedOnSeek() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_SEEK_BYTES_SKIPPED); + } + + @Override + public long getBytesBackwardsOnSeek() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS); + } + + @Override + public long getBytesReadInClose() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_CLOSE); + } + + @Override + public long getBytesDiscardedInAbort() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_ABORT); + } + + @Override + public long getOpenOperations() { + return lookupCounterValue(StreamStatisticNames.STREAM_READ_OPENED); + } + + @Override + public long getSeekOperations() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS); + } + + @Override + public long getReadExceptions() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_EXCEPTIONS); + } + + @Override + public long getReadOperations() { + return lookupCounterValue(StreamStatisticNames.STREAM_READ_OPERATIONS); + } + + @Override + public long getReadFullyOperations() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_FULLY_OPERATIONS); + } + + @Override + public long getReadsIncomplete() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_OPERATIONS_INCOMPLETE); + } + + @Override + public long getPolicySetCount() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_SEEK_POLICY_CHANGED); + } + + @Override + public long getVersionMismatches() { + return lookupCounterValue( + StreamStatisticNames.STREAM_READ_VERSION_MISMATCHES); + } + + @Override + public long getInputPolicy() { + return localIOStatistics().gauges() + .get(STREAM_READ_GAUGE_INPUT_POLICY); + } + + @Override + public DurationTracker initiateGetRequest() { + return trackDuration(ACTION_HTTP_GET_REQUEST); + } + } /** * Create a stream output statistics instance. + * @param filesystemStatistics thread-local FS statistics. * @return the new instance */ - OutputStreamStatistics newOutputStreamStatistics(Statistics statistics) { - return new OutputStreamStatistics(statistics); + public BlockOutputStreamStatistics newOutputStreamStatistics( + FileSystem.Statistics filesystemStatistics) { + return new OutputStreamStatistics(filesystemStatistics); } /** * Merge in the statistics of a single output stream into * the filesystem-wide statistics. - * @param statistics stream statistics + * @param source stream statistics */ - private void mergeOutputStreamStatistics(OutputStreamStatistics statistics) { - incrementCounter(STREAM_WRITE_TOTAL_TIME, statistics.totalUploadDuration()); - incrementCounter(STREAM_WRITE_QUEUE_DURATION, statistics.queueDuration); - incrementCounter(STREAM_WRITE_TOTAL_DATA, statistics.bytesUploaded); + private void mergeOutputStreamStatistics( + OutputStreamStatistics source) { + incrementCounter(STREAM_WRITE_TOTAL_TIME, source.totalUploadDuration()); + incrementCounter(STREAM_WRITE_QUEUE_DURATION, source.queueDuration); + incrementCounter(STREAM_WRITE_TOTAL_DATA, source.bytesUploaded); incrementCounter(STREAM_WRITE_BLOCK_UPLOADS, - statistics.blockUploadsCompleted); + source.blockUploadsCompleted); + incrementCounter(STREAM_WRITE_EXCEPTIONS, + source.lookupCounterValue( + StreamStatisticNames.STREAM_WRITE_EXCEPTIONS)); + // merge in all the IOStatistics + this.getIOStatistics().aggregate(source.getIOStatistics()); } /** * Statistics updated by an output stream during its actual operation. - * Some of these stats may be relayed. However, as block upload is - * spans multiple + *

    + * Some of these stats are propagated to any passed in + * {@link FileSystem.Statistics} instance; this is done + * in close() for better cross-thread accounting. + *

    + *

    + * Some of the collected statistics are not directly served via + * IOStatistics. + * They are added to the instrumentation IOStatistics and metric counters + * during the {@link #mergeOutputStreamStatistics(OutputStreamStatistics)} + * operation. + *

    */ - @InterfaceAudience.Private - @InterfaceStability.Unstable - public final class OutputStreamStatistics implements Closeable { - private final AtomicLong blocksSubmitted = new AtomicLong(0); - private final AtomicLong blocksInQueue = new AtomicLong(0); + private final class OutputStreamStatistics + extends AbstractS3AStatisticsSource + implements BlockOutputStreamStatistics { + private final AtomicLong blocksActive = new AtomicLong(0); private final AtomicLong blockUploadsCompleted = new AtomicLong(0); - private final AtomicLong blockUploadsFailed = new AtomicLong(0); - private final AtomicLong bytesPendingUpload = new AtomicLong(0); - private final AtomicLong bytesUploaded = new AtomicLong(0); + private final AtomicLong bytesWritten; + private final AtomicLong bytesUploaded; private final AtomicLong transferDuration = new AtomicLong(0); private final AtomicLong queueDuration = new AtomicLong(0); - private final AtomicLong exceptionsInMultipartFinalize = new AtomicLong(0); private final AtomicInteger blocksAllocated = new AtomicInteger(0); private final AtomicInteger blocksReleased = new AtomicInteger(0); - private Statistics statistics; + private final FileSystem.Statistics filesystemStatistics; - public OutputStreamStatistics(Statistics statistics){ - this.statistics = statistics; + /** + * Instantiate. + * @param filesystemStatistics FS Statistics to update in close(). + */ + private OutputStreamStatistics( + @Nullable FileSystem.Statistics filesystemStatistics) { + this.filesystemStatistics = filesystemStatistics; + IOStatisticsStore st = iostatisticsStore() + .withCounters( + STREAM_WRITE_BLOCK_UPLOADS.getSymbol(), + STREAM_WRITE_BYTES.getSymbol(), + STREAM_WRITE_EXCEPTIONS.getSymbol(), + STREAM_WRITE_EXCEPTIONS_COMPLETING_UPLOADS.getSymbol(), + STREAM_WRITE_QUEUE_DURATION.getSymbol(), + STREAM_WRITE_TOTAL_DATA.getSymbol(), + STREAM_WRITE_TOTAL_TIME.getSymbol()) + .withGauges( + STREAM_WRITE_BLOCK_UPLOADS_PENDING.getSymbol(), + STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING.getSymbol()) + .withDurationTracking( + ACTION_EXECUTOR_ACQUIRED, + INVOCATION_ABORT.getSymbol(), + OBJECT_MULTIPART_UPLOAD_ABORTED.getSymbol(), + MULTIPART_UPLOAD_COMPLETED.getSymbol()) + .build(); + setIOStatistics(st); + // these are extracted to avoid lookups on heavily used counters. + bytesUploaded = st.getCounterReference( + STREAM_WRITE_TOTAL_DATA.getSymbol()); + bytesWritten = st.getCounterReference( + StreamStatisticNames.STREAM_WRITE_BYTES); } /** - * A block has been allocated. + * Increment the Statistic gauge and the local IOStatistics + * equivalent. + * @param statistic statistic + * @param v value. + * @return local IOStatistic value */ - void blockAllocated() { + private long incAllGauges(Statistic statistic, long v) { + incrementGauge(statistic, v); + return incGauge(statistic.getSymbol(), v); + } + + @Override + public void blockAllocated() { blocksAllocated.incrementAndGet(); } + @Override + public void blockReleased() { + blocksReleased.incrementAndGet(); + } + /** - * A block has been released. + * {@inheritDoc} + * Increments the counter of block uplaods, and the gauges + * of block uploads pending (1) and the bytes pending (blockSize). */ - void blockReleased() { - blocksReleased.incrementAndGet(); + @Override + public void blockUploadQueued(int blockSize) { + incCounter(StreamStatisticNames.STREAM_WRITE_BLOCK_UPLOADS); + incAllGauges(STREAM_WRITE_BLOCK_UPLOADS_PENDING, 1); + incAllGauges(STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING, blockSize); + } + + /** + * {@inheritDoc} + * Update {@link #queueDuration} with queue duration, decrement + * {@code STREAM_WRITE_BLOCK_UPLOADS_PENDING} gauge and increment + * {@code STREAM_WRITE_BLOCK_UPLOADS_ACTIVE}. + */ + @Override + public void blockUploadStarted(Duration timeInQueue, int blockSize) { + // the local counter is used in toString reporting. + queueDuration.addAndGet(timeInQueue.toMillis()); + // update the duration fields in the IOStatistics. + localIOStatistics().addTimedOperation( + ACTION_EXECUTOR_ACQUIRED, + timeInQueue); + incAllGauges(STREAM_WRITE_BLOCK_UPLOADS_PENDING, -1); + incAllGauges(STREAM_WRITE_BLOCK_UPLOADS_ACTIVE, 1); } /** - * Block is queued for upload. + * Get the inner class's IO Statistics. This is + * needed to avoid findbugs warnings about ambiguity. + * @return the Input Stream's statistics. */ - void blockUploadQueued(int blockSize) { - blocksSubmitted.incrementAndGet(); - blocksInQueue.incrementAndGet(); - bytesPendingUpload.addAndGet(blockSize); - incrementGauge(STREAM_WRITE_BLOCK_UPLOADS_PENDING, 1); - incrementGauge(STREAM_WRITE_BLOCK_UPLOADS_DATA_PENDING, blockSize); - } - - /** Queued block has been scheduled for upload. */ - void blockUploadStarted(long duration, int blockSize) { - queueDuration.addAndGet(duration); - blocksInQueue.decrementAndGet(); - blocksActive.incrementAndGet(); - incrementGauge(STREAM_WRITE_BLOCK_UPLOADS_PENDING, -1); - incrementGauge(STREAM_WRITE_BLOCK_UPLOADS_ACTIVE, 1); - } - - /** A block upload has completed. */ - void blockUploadCompleted(long duration, int blockSize) { - this.transferDuration.addAndGet(duration); - incrementGauge(STREAM_WRITE_BLOCK_UPLOADS_ACTIVE, -1); - blocksActive.decrementAndGet(); + private IOStatisticsStore localIOStatistics() { + return OutputStreamStatistics.super.getIOStatistics(); + } + + /** + * {@inheritDoc} + * Increment the transfer duration; decrement the + * {@code STREAM_WRITE_BLOCK_UPLOADS_ACTIVE} gauge. + */ + @Override + public void blockUploadCompleted( + Duration timeSinceUploadStarted, + int blockSize) { + transferDuration.addAndGet(timeSinceUploadStarted.toMillis()); + incAllGauges(STREAM_WRITE_BLOCK_UPLOADS_ACTIVE, -1); blockUploadsCompleted.incrementAndGet(); } /** * A block upload has failed. * A final transfer completed event is still expected, so this - * does not decrement the active block counter. + * does not decrement any gauges. */ - void blockUploadFailed(long duration, int blockSize) { - blockUploadsFailed.incrementAndGet(); + @Override + public void blockUploadFailed( + Duration timeSinceUploadStarted, + int blockSize) { + incCounter(StreamStatisticNames.STREAM_WRITE_EXCEPTIONS); } - /** Intermediate report of bytes uploaded. */ - void bytesTransferred(long byteCount) { + /** + * Intermediate report of bytes uploaded. + * Increment counters of bytes upload, reduce the counter and + * gauge of pending bytes.; + * @param byteCount bytes uploaded + */ + @Override + public void bytesTransferred(long byteCount) { bytesUploaded.addAndGet(byteCount); - statistics.incrementBytesWritten(byteCount); - bytesPendingUpload.addAndGet(-byteCount); - incrementGauge(STREAM_WRITE_BLOCK_UPLOADS_DATA_PENDING, -byteCount); + incAllGauges(STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING, -byteCount); } - /** - * Note exception in a multipart complete. - * @param count count of exceptions - */ - void exceptionInMultipartComplete(int count) { + @Override + public void exceptionInMultipartComplete(int count) { if (count > 0) { - exceptionsInMultipartFinalize.addAndGet(count); + incCounter( + STREAM_WRITE_EXCEPTIONS_COMPLETING_UPLOADS.getSymbol(), + count); } } - /** - * Note an exception in a multipart abort. - */ - void exceptionInMultipartAbort() { - exceptionsInMultipartFinalize.incrementAndGet(); + @Override + public void exceptionInMultipartAbort() { + incCounter( + STREAM_WRITE_EXCEPTIONS_COMPLETING_UPLOADS.getSymbol()); } - /** - * Get the number of bytes pending upload. - * @return the number of bytes in the pending upload state. - */ + @Override public long getBytesPendingUpload() { - return bytesPendingUpload.get(); + return lookupGaugeValue( + STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING.getSymbol()); } - /** - * Data has been uploaded to be committed in a subsequent operation; - * to be called at the end of the write. - * @param size size in bytes - */ + @Override public void commitUploaded(long size) { incrementCounter(COMMITTER_BYTES_UPLOADED, size); } - /** - * Output stream has closed. - * Trigger merge in of all statistics not updated during operation. - */ @Override public void close() { - if (bytesPendingUpload.get() > 0) { + if (getBytesPendingUpload() > 0) { LOG.warn("Closing output stream statistics while data is still marked" + " as pending upload in {}", this); } mergeOutputStreamStatistics(this); + // and patch the FS statistics. + // provided the stream is closed in the worker thread, this will + // ensure that the thread-specific worker stats are updated. + if (filesystemStatistics != null) { + filesystemStatistics.incrementBytesWritten(bytesUploaded.get()); + } } - long averageQueueTime() { - return blocksSubmitted.get() > 0 ? - (queueDuration.get() / blocksSubmitted.get()) : 0; - } - - double effectiveBandwidth() { + /** + * What is the effective bandwidth of this stream's write. + * @return the bytes uploaded divided by the total duration. + */ + private double effectiveBandwidth() { double duration = totalUploadDuration() / 1000.0; return duration > 0 ? (bytesUploaded.get() / duration) : 0; } - long totalUploadDuration() { + /** + * Total of time spend uploading bytes. + * @return the transfer duration plus queue duration. + */ + private long totalUploadDuration() { return queueDuration.get() + transferDuration.get(); } - public int blocksAllocated() { + @Override + public int getBlocksAllocated() { return blocksAllocated.get(); } - public int blocksReleased() { + @Override + public int getBlocksReleased() { return blocksReleased.get(); } /** - * Get counters of blocks actively allocated; my be inaccurate + * Get counters of blocks actively allocated; may be inaccurate * if the numbers change during the (non-synchronized) calculation. * @return the number of actively allocated blocks. */ - public int blocksActivelyAllocated() { + @Override + public int getBlocksActivelyAllocated() { return blocksAllocated.get() - blocksReleased.get(); } + /** + * Record bytes written. + * @param count number of bytes + */ + @Override + public void writeBytes(long count) { + bytesWritten.addAndGet(count); + } + + /** + * Get the current count of bytes written. + * @return the counter value. + */ + @Override + public long getBytesWritten() { + return bytesWritten.get(); + } @Override public String toString() { final StringBuilder sb = new StringBuilder( "OutputStreamStatistics{"); - sb.append("blocksSubmitted=").append(blocksSubmitted); - sb.append(", blocksInQueue=").append(blocksInQueue); + sb.append(localIOStatistics().toString()); sb.append(", blocksActive=").append(blocksActive); sb.append(", blockUploadsCompleted=").append(blockUploadsCompleted); - sb.append(", blockUploadsFailed=").append(blockUploadsFailed); - sb.append(", bytesPendingUpload=").append(bytesPendingUpload); - sb.append(", bytesUploaded=").append(bytesUploaded); sb.append(", blocksAllocated=").append(blocksAllocated); sb.append(", blocksReleased=").append(blocksReleased); - sb.append(", blocksActivelyAllocated=").append(blocksActivelyAllocated()); - sb.append(", exceptionsInMultipartFinalize=").append( - exceptionsInMultipartFinalize); + sb.append(", blocksActivelyAllocated=") + .append(getBlocksActivelyAllocated()); sb.append(", transferDuration=").append(transferDuration).append(" ms"); - sb.append(", queueDuration=").append(queueDuration).append(" ms"); - sb.append(", averageQueueTime=").append(averageQueueTime()).append(" ms"); sb.append(", totalUploadDuration=").append(totalUploadDuration()) .append(" ms"); sb.append(", effectiveBandwidth=").append(effectiveBandwidth()) @@ -1166,10 +1617,6 @@ public void recordsRead(int count) { incrementCounter(S3GUARD_METADATASTORE_RECORD_READS, count); } - /** - * records have been written (including deleted). - * @param count number of records written. - */ @Override public void recordsWritten(int count) { incrementCounter(S3GUARD_METADATASTORE_RECORD_WRITES, count); @@ -1177,7 +1624,8 @@ public void recordsWritten(int count) { @Override public void directoryMarkedAuthoritative() { - incrementCounter(S3GUARD_METADATASTORE_AUTHORITATIVE_DIRECTORIES_UPDATED, + incrementCounter( + S3GUARD_METADATASTORE_AUTHORITATIVE_DIRECTORIES_UPDATED, 1); } @@ -1192,60 +1640,95 @@ public void entryAdded(final long durationNanos) { } /** - * Instrumentation exported to S3Guard Committers. + * Instrumentation exported to S3A Committers. + * The S3AInstrumentation metrics and + * {@link #instanceIOStatistics} are updated continuously. */ - @InterfaceAudience.Private - @InterfaceStability.Unstable - public final class CommitterStatistics { + private final class CommitterStatisticsImpl + extends AbstractS3AStatisticsSource + implements CommitterStatistics { + + private CommitterStatisticsImpl() { + IOStatisticsStore st = iostatisticsStore() + .withCounters( + COMMITTER_BYTES_COMMITTED.getSymbol(), + COMMITTER_BYTES_UPLOADED.getSymbol(), + COMMITTER_COMMITS_CREATED.getSymbol(), + COMMITTER_COMMITS_ABORTED.getSymbol(), + COMMITTER_COMMITS_COMPLETED.getSymbol(), + COMMITTER_COMMITS_FAILED.getSymbol(), + COMMITTER_COMMITS_REVERTED.getSymbol(), + COMMITTER_JOBS_FAILED.getSymbol(), + COMMITTER_JOBS_SUCCEEDED.getSymbol(), + COMMITTER_TASKS_FAILED.getSymbol(), + COMMITTER_TASKS_SUCCEEDED.getSymbol()) + .withDurationTracking( + COMMITTER_COMMIT_JOB.getSymbol(), + COMMITTER_MATERIALIZE_FILE.getSymbol(), + COMMITTER_STAGE_FILE_UPLOAD.getSymbol()) + .build(); + setIOStatistics(st); + } + + /** + * Increment both the local counter and the S3AInstrumentation counters. + * @param stat statistic + * @param value value + * @return the new value + */ + private long increment(Statistic stat, long value) { + incrementCounter(stat, value); + return incCounter(stat.getSymbol(), value); + } /** A commit has been created. */ + @Override public void commitCreated() { - incrementCounter(COMMITTER_COMMITS_CREATED, 1); + increment(COMMITTER_COMMITS_CREATED, 1); } - /** - * Data has been uploaded to be committed in a subsequent operation. - * @param size size in bytes - */ + @Override public void commitUploaded(long size) { - incrementCounter(COMMITTER_BYTES_UPLOADED, size); + increment(COMMITTER_BYTES_UPLOADED, size); } - /** - * A commit has been completed. - * @param size size in bytes - */ + @Override public void commitCompleted(long size) { - incrementCounter(COMMITTER_COMMITS_COMPLETED, 1); - incrementCounter(COMMITTER_BYTES_COMMITTED, size); + increment(COMMITTER_COMMITS_COMPLETED, 1); + increment(COMMITTER_BYTES_COMMITTED, size); } - /** A commit has been aborted. */ + @Override public void commitAborted() { - incrementCounter(COMMITTER_COMMITS_ABORTED, 1); + increment(COMMITTER_COMMITS_ABORTED, 1); } + @Override public void commitReverted() { - incrementCounter(COMMITTER_COMMITS_REVERTED, 1); + increment(COMMITTER_COMMITS_REVERTED, 1); } + @Override public void commitFailed() { - incrementCounter(COMMITTER_COMMITS_FAILED, 1); + increment(COMMITTER_COMMITS_FAILED, 1); } + @Override public void taskCompleted(boolean success) { - incrementCounter( - success ? COMMITTER_TASKS_SUCCEEDED + increment(success + ? COMMITTER_TASKS_SUCCEEDED : COMMITTER_TASKS_FAILED, 1); } + @Override public void jobCompleted(boolean success) { - incrementCounter( - success ? COMMITTER_JOBS_SUCCEEDED + increment(success + ? COMMITTER_JOBS_SUCCEEDED : COMMITTER_JOBS_FAILED, 1); } + } /** @@ -1253,26 +1736,33 @@ public void jobCompleted(boolean success) { * @return an instance of delegation token statistics */ public DelegationTokenStatistics newDelegationTokenStatistics() { - return new DelegationTokenStatistics(); + return new DelegationTokenStatisticsImpl(); } /** * Instrumentation exported to S3A Delegation Token support. + * The {@link #tokenIssued()} call is a no-op; + * This statistics class doesn't collect any local statistics. + * Instead it directly updates the S3A Instrumentation. */ - @InterfaceAudience.Private - @InterfaceStability.Unstable - public final class DelegationTokenStatistics { + private final class DelegationTokenStatisticsImpl implements + DelegationTokenStatistics { - private DelegationTokenStatistics() { + private DelegationTokenStatisticsImpl() { } - /** A token has been issued. */ + @Override public void tokenIssued() { - incrementCounter(DELEGATION_TOKENS_ISSUED, 1); + } + + @Override + public DurationTracker trackDuration(final String key, final long count) { + return getDurationTrackerFactory() + .trackDuration(key, count); } } - /** + /** * Copy all the metrics to a map of (name, long-value). * @return a map of the metrics */ @@ -1288,7 +1778,7 @@ public Map toMap() { private static class MetricsToMap extends MetricsRecordBuilder { private final MetricsCollector parent; private final Map map = - new HashMap<>(COUNTERS_TO_CREATE.length * 2); + new HashMap<>(); MetricsToMap(MetricsCollector parent) { this.parent = parent; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ALocatedFileStatus.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ALocatedFileStatus.java index ac850895e6300..10295484fe815 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ALocatedFileStatus.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ALocatedFileStatus.java @@ -21,7 +21,7 @@ import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.LocatedFileStatus; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * {@link LocatedFileStatus} extended to also carry ETag and object version ID. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java deleted file mode 100644 index cf58751ea446c..0000000000000 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AMultipartUploader.java +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.s3a; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - -import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; -import com.amazonaws.services.s3.model.PartETag; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Charsets; -import com.google.common.base.Preconditions; - -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.BBPartHandle; -import org.apache.hadoop.fs.BBUploadHandle; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.MultipartUploader; -import org.apache.hadoop.fs.MultipartUploaderFactory; -import org.apache.hadoop.fs.PartHandle; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathHandle; -import org.apache.hadoop.fs.UploadHandle; - -import static org.apache.hadoop.fs.s3a.Constants.FS_S3A; - -/** - * MultipartUploader for S3AFileSystem. This uses the S3 multipart - * upload mechanism. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class S3AMultipartUploader extends MultipartUploader { - - private final S3AFileSystem s3a; - - /** Header for Parts: {@value}. */ - - public static final String HEADER = "S3A-part01"; - - public S3AMultipartUploader(FileSystem fs, Configuration conf) { - Preconditions.checkArgument(fs instanceof S3AFileSystem, - "Wrong filesystem: expected S3A but got %s", fs); - s3a = (S3AFileSystem) fs; - } - - @Override - public UploadHandle initialize(Path filePath) throws IOException { - final WriteOperationHelper writeHelper = s3a.getWriteOperationHelper(); - String key = s3a.pathToKey(filePath); - String uploadId = writeHelper.initiateMultiPartUpload(key); - return BBUploadHandle.from(ByteBuffer.wrap( - uploadId.getBytes(Charsets.UTF_8))); - } - - @Override - public PartHandle putPart(Path filePath, InputStream inputStream, - int partNumber, UploadHandle uploadId, long lengthInBytes) - throws IOException { - checkPutArguments(filePath, inputStream, partNumber, uploadId, - lengthInBytes); - byte[] uploadIdBytes = uploadId.toByteArray(); - checkUploadId(uploadIdBytes); - String key = s3a.pathToKey(filePath); - final WriteOperationHelper writeHelper = s3a.getWriteOperationHelper(); - String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, - Charsets.UTF_8); - UploadPartRequest request = writeHelper.newUploadPartRequest(key, - uploadIdString, partNumber, (int) lengthInBytes, inputStream, null, 0L); - UploadPartResult result = writeHelper.uploadPart(request); - String eTag = result.getETag(); - return BBPartHandle.from( - ByteBuffer.wrap( - buildPartHandlePayload(eTag, lengthInBytes))); - } - - @Override - public PathHandle complete(Path filePath, - Map handleMap, - UploadHandle uploadId) - throws IOException { - byte[] uploadIdBytes = uploadId.toByteArray(); - checkUploadId(uploadIdBytes); - - checkPartHandles(handleMap); - List> handles = - new ArrayList<>(handleMap.entrySet()); - handles.sort(Comparator.comparingInt(Map.Entry::getKey)); - final WriteOperationHelper writeHelper = s3a.getWriteOperationHelper(); - String key = s3a.pathToKey(filePath); - - String uploadIdStr = new String(uploadIdBytes, 0, uploadIdBytes.length, - Charsets.UTF_8); - ArrayList eTags = new ArrayList<>(); - eTags.ensureCapacity(handles.size()); - long totalLength = 0; - for (Map.Entry handle : handles) { - byte[] payload = handle.getValue().toByteArray(); - Pair result = parsePartHandlePayload(payload); - totalLength += result.getLeft(); - eTags.add(new PartETag(handle.getKey(), result.getRight())); - } - AtomicInteger errorCount = new AtomicInteger(0); - CompleteMultipartUploadResult result = writeHelper.completeMPUwithRetries( - key, uploadIdStr, eTags, totalLength, errorCount); - - byte[] eTag = result.getETag().getBytes(Charsets.UTF_8); - return (PathHandle) () -> ByteBuffer.wrap(eTag); - } - - @Override - public void abort(Path filePath, UploadHandle uploadId) throws IOException { - final byte[] uploadIdBytes = uploadId.toByteArray(); - checkUploadId(uploadIdBytes); - final WriteOperationHelper writeHelper = s3a.getWriteOperationHelper(); - String key = s3a.pathToKey(filePath); - String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, - Charsets.UTF_8); - writeHelper.abortMultipartCommit(key, uploadIdString); - } - - /** - * Factory for creating MultipartUploader objects for s3a:// FileSystems. - */ - public static class Factory extends MultipartUploaderFactory { - @Override - protected MultipartUploader createMultipartUploader(FileSystem fs, - Configuration conf) { - if (FS_S3A.equals(fs.getScheme())) { - return new S3AMultipartUploader(fs, conf); - } - return null; - } - } - - /** - * Build the payload for marshalling. - * @param eTag upload etag - * @param len length - * @return a byte array to marshall. - * @throws IOException error writing the payload - */ - @VisibleForTesting - static byte[] buildPartHandlePayload(String eTag, long len) - throws IOException { - Preconditions.checkArgument(StringUtils.isNotEmpty(eTag), - "Empty etag"); - Preconditions.checkArgument(len >= 0, - "Invalid length"); - - ByteArrayOutputStream bytes = new ByteArrayOutputStream(); - try(DataOutputStream output = new DataOutputStream(bytes)) { - output.writeUTF(HEADER); - output.writeLong(len); - output.writeUTF(eTag); - } - return bytes.toByteArray(); - } - - /** - * Parse the payload marshalled as a part handle. - * @param data handle data - * @return the length and etag - * @throws IOException error reading the payload - */ - @VisibleForTesting - static Pair parsePartHandlePayload(byte[] data) - throws IOException { - - try(DataInputStream input = - new DataInputStream(new ByteArrayInputStream(data))) { - final String header = input.readUTF(); - if (!HEADER.equals(header)) { - throw new IOException("Wrong header string: \"" + header + "\""); - } - final long len = input.readLong(); - final String etag = input.readUTF(); - if (len < 0) { - throw new IOException("Negative length"); - } - return Pair.of(len, etag); - } - } - -} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOpContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOpContext.java index 4e0aac5138eea..aca1fa5e1408f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOpContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOpContext.java @@ -20,23 +20,27 @@ import javax.annotation.Nullable; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.s3a.impl.ActiveOperationContext; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; /** - * Base class for operation context struct passed through codepaths for main + * Class for operation context struct passed through codepaths for main * S3AFileSystem operations. * Anything op-specific should be moved to a subclass of this. + * + * This was originally a base class, but {@link ActiveOperationContext} was + * created to be more minimal and cover many more operation type. */ -@SuppressWarnings("visibilitymodifier") // I want a struct of finals, for real. -public class S3AOpContext { +@SuppressWarnings("visibilitymodifier") +public class S3AOpContext extends ActiveOperationContext { final boolean isS3GuardEnabled; final Invoker invoker; @Nullable final FileSystem.Statistics stats; - final S3AInstrumentation instrumentation; @Nullable final Invoker s3guardInvoker; /** FileStatus for "destination" path being operated on. */ @@ -53,9 +57,14 @@ public class S3AOpContext { * @param dstFileStatus file status from existence check */ public S3AOpContext(boolean isS3GuardEnabled, Invoker invoker, - Invoker s3guardInvoker, @Nullable FileSystem.Statistics stats, - S3AInstrumentation instrumentation, FileStatus dstFileStatus) { + @Nullable Invoker s3guardInvoker, + @Nullable FileSystem.Statistics stats, + S3AStatisticsContext instrumentation, + FileStatus dstFileStatus) { + super(newOperationId(), + instrumentation, + null); Preconditions.checkNotNull(invoker, "Null invoker arg"); Preconditions.checkNotNull(instrumentation, "Null instrumentation arg"); Preconditions.checkNotNull(dstFileStatus, "Null dstFileStatus arg"); @@ -65,7 +74,6 @@ public S3AOpContext(boolean isS3GuardEnabled, Invoker invoker, this.invoker = invoker; this.s3guardInvoker = s3guardInvoker; this.stats = stats; - this.instrumentation = instrumentation; this.dstFileStatus = dstFileStatus; } @@ -77,8 +85,10 @@ public S3AOpContext(boolean isS3GuardEnabled, Invoker invoker, * @param instrumentation instrumentation to use * @param dstFileStatus file status from existence check */ - public S3AOpContext(boolean isS3GuardEnabled, Invoker invoker, - @Nullable FileSystem.Statistics stats, S3AInstrumentation instrumentation, + public S3AOpContext(boolean isS3GuardEnabled, + Invoker invoker, + @Nullable FileSystem.Statistics stats, + S3AStatisticsContext instrumentation, FileStatus dstFileStatus) { this(isS3GuardEnabled, invoker, null, stats, instrumentation, dstFileStatus); @@ -97,10 +107,6 @@ public FileSystem.Statistics getStats() { return stats; } - public S3AInstrumentation getInstrumentation() { - return instrumentation; - } - @Nullable public Invoker getS3guardInvoker() { return s3guardInvoker; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java index a7317c945127c..3729341dbfe27 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java @@ -22,12 +22,13 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; import javax.annotation.Nullable; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Read-specific operation context struct. @@ -60,8 +61,8 @@ public class S3AReadOpContext extends S3AOpContext { * @param isS3GuardEnabled true iff S3Guard is enabled. * @param invoker invoker for normal retries. * @param s3guardInvoker S3Guard-specific retry invoker. - * @param stats statistics (may be null) - * @param instrumentation FS instrumentation + * @param stats Fileystem statistics (may be null) + * @param instrumentation statistics context * @param dstFileStatus target file status * @param inputPolicy the input policy * @param readahead readahead for GET operations/skip, etc. @@ -71,13 +72,14 @@ public S3AReadOpContext( final Path path, boolean isS3GuardEnabled, Invoker invoker, - Invoker s3guardInvoker, + @Nullable Invoker s3guardInvoker, @Nullable FileSystem.Statistics stats, - S3AInstrumentation instrumentation, + S3AStatisticsContext instrumentation, FileStatus dstFileStatus, S3AInputPolicy inputPolicy, ChangeDetectionPolicy changeDetectionPolicy, final long readahead) { + super(isS3GuardEnabled, invoker, s3guardInvoker, stats, instrumentation, dstFileStatus); this.path = checkNotNull(path); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java index d2954b3a92045..286dc213b2187 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java @@ -32,7 +32,7 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputExceededException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AStorageStatistics.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AStorageStatistics.java index 4b126673b555f..33ae09119960e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AStorageStatistics.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AStorageStatistics.java @@ -20,106 +20,27 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.StorageStatistics; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.impl.StorageStatisticsFromIOStatistics; -import java.util.Collections; -import java.util.EnumMap; -import java.util.Iterator; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.concurrent.atomic.AtomicLong; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.emptyStatistics; /** - * Storage statistics for S3A. + * Storage statistics for S3A, dynamically generated from the IOStatistics. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class S3AStorageStatistics extends StorageStatistics - implements Iterable { - private static final Logger LOG = - LoggerFactory.getLogger(S3AStorageStatistics.class); +public class S3AStorageStatistics + extends StorageStatisticsFromIOStatistics { public static final String NAME = "S3AStorageStatistics"; - private final Map opsCount = - new EnumMap<>(Statistic.class); - public S3AStorageStatistics() { - super(NAME); - for (Statistic opType : Statistic.values()) { - opsCount.put(opType, new AtomicLong(0)); - } - } - - /** - * Increment a specific counter. - * @param op operation - * @param count increment value - * @return the new value - */ - public long incrementCounter(Statistic op, long count) { - long updated = opsCount.get(op).addAndGet(count); - LOG.debug("{} += {} -> {}", op, count, updated); - return updated; - } - - private class LongIterator implements Iterator { - private Iterator> iterator = - Collections.unmodifiableSet(opsCount.entrySet()).iterator(); - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public LongStatistic next() { - if (!iterator.hasNext()) { - throw new NoSuchElementException(); - } - final Map.Entry entry = iterator.next(); - return new LongStatistic(entry.getKey().getSymbol(), - entry.getValue().get()); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - } - - @Override - public String getScheme() { - return "s3a"; + public S3AStorageStatistics(final IOStatistics ioStatistics) { + super(NAME, "s3a", ioStatistics); } - @Override - public Iterator getLongStatistics() { - return new LongIterator(); - } - - @Override - public Iterator iterator() { - return getLongStatistics(); - } - - @Override - public Long getLong(String key) { - final Statistic type = Statistic.fromSymbol(key); - return type == null ? null : opsCount.get(type).get(); - } - - @Override - public boolean isTracked(String key) { - return Statistic.fromSymbol(key) != null; - } - - @Override - public void reset() { - for (AtomicLong value : opsCount.values()) { - value.set(0); - } + public S3AStorageStatistics() { + super(NAME, "s3a", emptyStatistics()); } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java index 1d399505f5823..314f13f0352e9 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java @@ -35,18 +35,20 @@ import com.amazonaws.services.s3.model.MultiObjectDeleteException; import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.util.functional.RemoteIterators; import org.apache.hadoop.fs.s3a.auth.IAMInstanceCredentialsProvider; import org.apache.hadoop.fs.s3a.impl.NetworkBinding; import org.apache.hadoop.fs.s3native.S3xLoginHelper; @@ -54,7 +56,7 @@ import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.util.VersionInfo; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1416,6 +1418,30 @@ private static void initUserAgent(Configuration conf, awsConf.setUserAgentPrefix(userAgent); } + /** + * Convert the data of an iterator of {@link S3AFileStatus} to + * an array. Given tombstones are filtered out. If the iterator + * does return any item, an empty array is returned. + * @param iterator a non-null iterator + * @param tombstones + * @return a possibly-empty array of file status entries + * @throws IOException + */ + public static S3AFileStatus[] iteratorToStatuses( + RemoteIterator iterator, Set tombstones) + throws IOException { + List statuses = new ArrayList<>(); + + while (iterator.hasNext()) { + S3AFileStatus status = iterator.next(); + if (!tombstones.contains(status.getPath())) { + statuses.add(status); + } + } + + return statuses.toArray(new S3AFileStatus[0]); + } + /** * An interface for use in lambda-expressions working with * directory tree listings. @@ -1445,12 +1471,7 @@ public interface LocatedFileStatusMap { public static long applyLocatedFiles( RemoteIterator iterator, CallOnLocatedFileStatus eval) throws IOException { - long count = 0; - while (iterator.hasNext()) { - count++; - eval.call(iterator.next()); - } - return count; + return RemoteIterators.foreach(iterator, eval::call); } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java index e0a1d780ccf5f..dbb39fb662408 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ClientFactory.java @@ -18,35 +18,246 @@ package org.apache.hadoop.fs.s3a; +import javax.annotation.Nullable; import java.io.IOException; import java.net.URI; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.handlers.RequestHandler2; +import com.amazonaws.monitoring.MonitoringListener; import com.amazonaws.services.s3.AmazonS3; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; + +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_ENDPOINT; /** * Factory for creation of {@link AmazonS3} client instances. + * Important: HBase's HBoss module implements this interface in its + * tests. + * Take care when updating this interface to ensure that a client + * implementing only the deprecated method will work. + * See https://github.com/apache/hbase-filesystem + * */ -@InterfaceAudience.Private -@InterfaceStability.Unstable +@InterfaceAudience.LimitedPrivate("HBoss") +@InterfaceStability.Evolving public interface S3ClientFactory { /** * Creates a new {@link AmazonS3} client. * - * @param name raw input S3A file system URI - * @param bucket Optional bucket to use to look up per-bucket proxy secrets - * @param credentialSet credentials to use - * @param userAgentSuffix optional suffix for the UA field. + * @param uri S3A file system URI + * @param parameters parameter object * @return S3 client * @throws IOException IO problem */ - AmazonS3 createS3Client(URI name, - String bucket, - AWSCredentialsProvider credentialSet, - String userAgentSuffix) throws IOException; + AmazonS3 createS3Client(URI uri, + S3ClientCreationParameters parameters) throws IOException; + + /** + * Settings for the S3 Client. + * Implemented as a class to pass in so that adding + * new parameters does not break the binding of + * external implementations of the factory. + */ + final class S3ClientCreationParameters { + + /** + * Credentials. + */ + private AWSCredentialsProvider credentialSet; + + /** + * Endpoint. + */ + private String endpoint = DEFAULT_ENDPOINT; + + /** + * Custom Headers. + */ + private final Map headers = new HashMap<>(); + + /** + * Monitoring listener. + */ + private MonitoringListener monitoringListener; + + /** + * RequestMetricCollector metrics...if not-null will be wrapped + * with an {@code AwsStatisticsCollector} and passed to + * the client. + */ + private StatisticsFromAwsSdk metrics; + + /** + * Use (deprecated) path style access. + */ + private boolean pathStyleAccess; + + /** + * This is in the settings awaiting wiring up and testing. + */ + private boolean requesterPays; + + /** + * Request handlers; used for auditing, X-Ray etc. + */ + private List requestHandlers; + + /** + * Suffix to UA. + */ + private String userAgentSuffix = ""; + + public List getRequestHandlers() { + return requestHandlers; + } + + /** + * List of request handlers. + * @param handlers handler list. + * @return this object + */ + public S3ClientCreationParameters withRequestHandlers( + @Nullable final List handlers) { + requestHandlers = handlers; + return this; + } + + public MonitoringListener getMonitoringListener() { + return monitoringListener; + } + + /** + * listener for AWS monitoring events. + * @param listener listener + * @return this object + */ + public S3ClientCreationParameters withMonitoringListener( + @Nullable final MonitoringListener listener) { + monitoringListener = listener; + return this; + } + + public StatisticsFromAwsSdk getMetrics() { + return metrics; + } + + /** + * Metrics binding. This is the S3A-level + * statistics interface, which will be wired + * up to the AWS callbacks. + * @param statistics statistics implementation + * @return this object + */ + public S3ClientCreationParameters withMetrics( + @Nullable final StatisticsFromAwsSdk statistics) { + metrics = statistics; + return this; + } + + /** + * Requester pays option. Not yet wired up. + * @param value new value + * @return the builder + */ + public S3ClientCreationParameters withRequesterPays( + final boolean value) { + requesterPays = value; + return this; + } + + public boolean isRequesterPays() { + return requesterPays; + } + + public AWSCredentialsProvider getCredentialSet() { + return credentialSet; + } + + /** + * Set credentials. + * @param value new value + * @return the builder + */ + + public S3ClientCreationParameters withCredentialSet( + final AWSCredentialsProvider value) { + credentialSet = value; + return this; + } + + public String getUserAgentSuffix() { + return userAgentSuffix; + } + + /** + * Set UA suffix. + * @param value new value + * @return the builder + */ + + public S3ClientCreationParameters withUserAgentSuffix( + final String value) { + userAgentSuffix = value; + return this; + } + + public String getEndpoint() { + return endpoint; + } + + /** + * Set endpoint. + * @param value new value + * @return the builder + */ + + public S3ClientCreationParameters withEndpoint( + final String value) { + endpoint = value; + return this; + } + + public boolean isPathStyleAccess() { + return pathStyleAccess; + } + + /** + * Set path access option. + * @param value new value + * @return the builder + */ + public S3ClientCreationParameters withPathStyleAccess( + final boolean value) { + pathStyleAccess = value; + return this; + } + + /** + * Add a custom header. + * @param header header name + * @param value new value + * @return the builder + */ + public S3ClientCreationParameters withHeader( + String header, String value) { + headers.put(header, value); + return this; + } + /** + * Get the map of headers. + * @return (mutable) header map + */ + public Map getHeaders() { + return headers; + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ListRequest.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ListRequest.java index 1a0d2c3378ca6..d51211516f251 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ListRequest.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ListRequest.java @@ -24,11 +24,18 @@ /** * API version-independent container for S3 List requests. */ -public class S3ListRequest { - private ListObjectsRequest v1Request; - private ListObjectsV2Request v2Request; +public final class S3ListRequest { - protected S3ListRequest(ListObjectsRequest v1, ListObjectsV2Request v2) { + /** + * Format for the toString() method: {@value}. + */ + private static final String DESCRIPTION + = "List %s:/%s delimiter=%s keys=%d requester pays=%s"; + + private final ListObjectsRequest v1Request; + private final ListObjectsV2Request v2Request; + + private S3ListRequest(ListObjectsRequest v1, ListObjectsV2Request v2) { v1Request = v1; v2Request = v2; } @@ -70,11 +77,15 @@ public ListObjectsV2Request getV2() { @Override public String toString() { if (isV1()) { - return String.format("List %s:/%s", - v1Request.getBucketName(), v1Request.getPrefix()); + return String.format(DESCRIPTION, + v1Request.getBucketName(), v1Request.getPrefix(), + v1Request.getDelimiter(), v1Request.getMaxKeys(), + v1Request.isRequesterPays()); } else { - return String.format("List %s:/%s", - v2Request.getBucketName(), v2Request.getPrefix()); + return String.format(DESCRIPTION, + v2Request.getBucketName(), v2Request.getPrefix(), + v2Request.getDelimiter(), v2Request.getMaxKeys(), + v2Request.isRequesterPays()); } } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ListResult.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ListResult.java index e8aff329070ef..69794c04db53c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ListResult.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ListResult.java @@ -18,11 +18,18 @@ package org.apache.hadoop.fs.s3a; +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + import com.amazonaws.services.s3.model.ListObjectsV2Result; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; +import org.slf4j.Logger; -import java.util.List; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.impl.ContextAccessors; /** * API version-independent container for S3 List responses. @@ -92,6 +99,110 @@ public List getCommonPrefixes() { } else { return v2Result.getCommonPrefixes(); } + } + + /** + * Is the list of object summaries empty + * after accounting for tombstone markers (if provided)? + * @param accessors callback for key to path mapping. + * @param tombstones Set of tombstone markers, or null if not applicable. + * @return false if summaries contains objects not accounted for by + * tombstones. + */ + public boolean isEmptyOfObjects( + final ContextAccessors accessors, + final Set tombstones) { + if (tombstones == null) { + return getObjectSummaries().isEmpty(); + } + return isEmptyOfKeys(accessors, + objectSummaryKeys(), + tombstones); + } + + /** + * Get the list of keys in the object summary. + * @return a possibly empty list + */ + private List objectSummaryKeys() { + return getObjectSummaries().stream() + .map(S3ObjectSummary::getKey) + .collect(Collectors.toList()); + } + + /** + * Does this listing have prefixes or objects after entries with + * tombstones have been stripped? + * @param accessors callback for key to path mapping. + * @param tombstones Set of tombstone markers, or null if not applicable. + * @return true if the reconciled list is non-empty + */ + public boolean hasPrefixesOrObjects( + final ContextAccessors accessors, + final Set tombstones) { + + return !isEmptyOfKeys(accessors, getCommonPrefixes(), tombstones) + || !isEmptyOfObjects(accessors, tombstones); + } + + /** + * Helper function to determine if a collection of keys is empty + * after accounting for tombstone markers (if provided). + * @param accessors callback for key to path mapping. + * @param keys Collection of path (prefixes / directories or keys). + * @param tombstones Set of tombstone markers, or null if not applicable. + * @return true if the list is considered empty. + */ + public boolean isEmptyOfKeys( + final ContextAccessors accessors, + final Collection keys, + final Set tombstones) { + if (tombstones == null) { + return keys.isEmpty(); + } + for (String key : keys) { + Path qualified = accessors.keyToPath(key); + if (!tombstones.contains(qualified)) { + return false; + } + } + return true; + } + /** + * Does this listing represent an empty directory? + * @param contextAccessors callback for key to path mapping. + * @param dirKey directory key + * @param tombstones Set of tombstone markers, or null if not applicable. + * @return true if the list is considered empty. + */ + public boolean representsEmptyDirectory( + final ContextAccessors contextAccessors, + final String dirKey, + final Set tombstones) { + // If looking for an empty directory, the marker must exist but + // no children. + // So the listing must contain the marker entry only as an object, + // and prefixes is null + List keys = objectSummaryKeys(); + return keys.size() == 1 && keys.contains(dirKey) + && getCommonPrefixes().isEmpty(); + } + + /** + * Dmp the result at debug level. + * @param log log to use + */ + public void logAtDebug(Logger log) { + Collection prefixes = getCommonPrefixes(); + Collection summaries = getObjectSummaries(); + log.debug("Prefix count = {}; object count={}", + prefixes.size(), summaries.size()); + for (S3ObjectSummary summary : summaries) { + log.debug("Summary: {} {}", summary.getKey(), summary.getSize()); + } + for (String prefix : prefixes) { + log.debug("Prefix: {}", prefix); + } } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java index 255d0095f80c4..ee689e58b7ef0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java @@ -21,7 +21,7 @@ import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java index 1d3d4758028c6..1a53f0d1f8797 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java @@ -18,224 +18,527 @@ package org.apache.hadoop.fs.s3a; -import org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames; - import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; +import org.apache.hadoop.fs.statistics.StreamStatisticNames; + +import static org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum.TYPE_COUNTER; +import static org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum.TYPE_DURATION; +import static org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum.TYPE_GAUGE; +import static org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum.TYPE_QUANTILE; + /** * Statistic which are collected in S3A. - * These statistics are available at a low level in {@link S3AStorageStatistics} - * and as metrics in {@link S3AInstrumentation} + * Counter and duration statistics are published in + * {@link S3AFileSystem#getStorageStatistics()}. + * and as metrics in {@link S3AInstrumentation}. + *

    + * Where possible, stream names come from {@link StreamStatisticNames} + * and {@link StoreStatisticNames} + *

    */ +@InterfaceStability.Unstable public enum Statistic { + /* Low-level duration counters */ + ACTION_EXECUTOR_ACQUIRED( + StoreStatisticNames.ACTION_EXECUTOR_ACQUIRED, + "Executor acquired.", + TYPE_DURATION), + ACTION_HTTP_HEAD_REQUEST( + StoreStatisticNames.ACTION_HTTP_HEAD_REQUEST, + "HEAD request.", + TYPE_DURATION), + ACTION_HTTP_GET_REQUEST( + StoreStatisticNames.ACTION_HTTP_GET_REQUEST, + "GET request.", + TYPE_DURATION), + + /* FileSystem Level statistics */ DIRECTORIES_CREATED("directories_created", - "Total number of directories created through the object store."), + "Total number of directories created through the object store.", + TYPE_COUNTER), DIRECTORIES_DELETED("directories_deleted", - "Total number of directories deleted through the object store."), + "Total number of directories deleted through the object store.", + TYPE_COUNTER), FILES_COPIED("files_copied", - "Total number of files copied within the object store."), + "Total number of files copied within the object store.", + TYPE_COUNTER), FILES_COPIED_BYTES("files_copied_bytes", - "Total number of bytes copied within the object store."), + "Total number of bytes copied within the object store.", + TYPE_COUNTER), FILES_CREATED("files_created", - "Total number of files created through the object store."), + "Total number of files created through the object store.", + TYPE_COUNTER), FILES_DELETED("files_deleted", - "Total number of files deleted from the object store."), + "Total number of files deleted from the object store.", + TYPE_COUNTER), FILES_DELETE_REJECTED("files_delete_rejected", - "Total number of files whose delete request was rejected"), + "Total number of files whose delete request was rejected", + TYPE_COUNTER), FAKE_DIRECTORIES_CREATED("fake_directories_created", - "Total number of fake directory entries created in the object store."), + "Total number of fake directory entries created in the object store.", + TYPE_COUNTER), FAKE_DIRECTORIES_DELETED("fake_directories_deleted", - "Total number of fake directory deletes submitted to object store."), - IGNORED_ERRORS("ignored_errors", "Errors caught and ignored"), - INVOCATION_COPY_FROM_LOCAL_FILE(CommonStatisticNames.OP_COPY_FROM_LOCAL_FILE, - "Calls of copyFromLocalFile()"), - INVOCATION_CREATE(CommonStatisticNames.OP_CREATE, - "Calls of create()"), - INVOCATION_CREATE_NON_RECURSIVE(CommonStatisticNames.OP_CREATE_NON_RECURSIVE, - "Calls of createNonRecursive()"), - INVOCATION_DELETE(CommonStatisticNames.OP_DELETE, - "Calls of delete()"), - INVOCATION_EXISTS(CommonStatisticNames.OP_EXISTS, - "Calls of exists()"), - INVOCATION_GET_DELEGATION_TOKEN(CommonStatisticNames.OP_GET_DELEGATION_TOKEN, - "Calls of getDelegationToken()"), - INVOCATION_GET_FILE_CHECKSUM(CommonStatisticNames.OP_GET_FILE_CHECKSUM, - "Calls of getFileChecksum()"), - INVOCATION_GET_FILE_STATUS(CommonStatisticNames.OP_GET_FILE_STATUS, - "Calls of getFileStatus()"), - INVOCATION_GLOB_STATUS(CommonStatisticNames.OP_GLOB_STATUS, - "Calls of globStatus()"), - INVOCATION_IS_DIRECTORY(CommonStatisticNames.OP_IS_DIRECTORY, - "Calls of isDirectory()"), - INVOCATION_IS_FILE(CommonStatisticNames.OP_IS_FILE, - "Calls of isFile()"), - INVOCATION_LIST_FILES(CommonStatisticNames.OP_LIST_FILES, - "Calls of listFiles()"), - INVOCATION_LIST_LOCATED_STATUS(CommonStatisticNames.OP_LIST_LOCATED_STATUS, - "Calls of listLocatedStatus()"), - INVOCATION_LIST_STATUS(CommonStatisticNames.OP_LIST_STATUS, - "Calls of listStatus()"), - INVOCATION_MKDIRS(CommonStatisticNames.OP_MKDIRS, - "Calls of mkdirs()"), - INVOCATION_OPEN(CommonStatisticNames.OP_OPEN, - "Calls of open()"), - INVOCATION_RENAME(CommonStatisticNames.OP_RENAME, - "Calls of rename()"), - OBJECT_COPY_REQUESTS("object_copy_requests", "Object copy requests"), - OBJECT_DELETE_REQUESTS("object_delete_requests", "Object delete requests"), - OBJECT_LIST_REQUESTS("object_list_requests", - "Number of object listings made"), - OBJECT_CONTINUE_LIST_REQUESTS("object_continue_list_requests", - "Number of continued object listings made"), - OBJECT_METADATA_REQUESTS("object_metadata_requests", - "Number of requests for object metadata"), - OBJECT_MULTIPART_UPLOAD_INITIATED("object_multipart_initiated", - "Object multipart upload initiated"), - OBJECT_MULTIPART_UPLOAD_ABORTED("object_multipart_aborted", - "Object multipart upload aborted"), - OBJECT_PUT_REQUESTS("object_put_requests", - "Object put/multipart upload count"), - OBJECT_PUT_REQUESTS_COMPLETED("object_put_requests_completed", - "Object put/multipart upload completed count"), - OBJECT_PUT_REQUESTS_ACTIVE("object_put_requests_active", - "Current number of active put requests"), - OBJECT_PUT_BYTES("object_put_bytes", "number of bytes uploaded"), - OBJECT_PUT_BYTES_PENDING("object_put_bytes_pending", - "number of bytes queued for upload/being actively uploaded"), - OBJECT_SELECT_REQUESTS("object_select_requests", - "Count of S3 Select requests issued"), - STREAM_ABORTED("stream_aborted", - "Count of times the TCP stream was aborted"), - STREAM_BACKWARD_SEEK_OPERATIONS("stream_backward_seek_operations", - "Number of executed seek operations which went backwards in a stream"), - STREAM_CLOSED("stream_closed", "Count of times the TCP stream was closed"), - STREAM_CLOSE_OPERATIONS("stream_close_operations", - "Total count of times an attempt to close a data stream was made"), - STREAM_FORWARD_SEEK_OPERATIONS("stream_forward_seek_operations", - "Number of executed seek operations which went forward in a stream"), - STREAM_OPENED("stream_opened", - "Total count of times an input stream to object store was opened"), - STREAM_READ_EXCEPTIONS("stream_read_exceptions", - "Number of exceptions invoked on input streams"), - STREAM_READ_FULLY_OPERATIONS("stream_read_fully_operations", - "Count of readFully() operations in streams"), - STREAM_READ_OPERATIONS("stream_read_operations", - "Count of read() operations in streams"), - STREAM_READ_OPERATIONS_INCOMPLETE("stream_read_operations_incomplete", - "Count of incomplete read() operations in streams"), - STREAM_READ_VERSION_MISMATCHES("stream_read_version_mismatches", - "Count of version mismatches encountered while reading streams"), - STREAM_SEEK_BYTES_BACKWARDS("stream_bytes_backwards_on_seek", - "Count of bytes moved backwards during seek operations"), - STREAM_SEEK_BYTES_READ("stream_bytes_read", - "Count of bytes read during seek() in stream operations"), - STREAM_SEEK_BYTES_SKIPPED("stream_bytes_skipped_on_seek", - "Count of bytes skipped during forward seek operation"), - STREAM_SEEK_OPERATIONS("stream_seek_operations", - "Number of seek operations during stream IO."), - STREAM_CLOSE_BYTES_READ("stream_bytes_read_in_close", - "Count of bytes read when closing streams during seek operations."), - STREAM_ABORT_BYTES_DISCARDED("stream_bytes_discarded_in_abort", - "Count of bytes discarded by aborting the stream"), - STREAM_WRITE_FAILURES("stream_write_failures", - "Count of stream write failures reported"), - STREAM_WRITE_BLOCK_UPLOADS("stream_write_block_uploads", - "Count of block/partition uploads completed"), - STREAM_WRITE_BLOCK_UPLOADS_ACTIVE("stream_write_block_uploads_active", - "Count of block/partition uploads completed"), - STREAM_WRITE_BLOCK_UPLOADS_COMMITTED("stream_write_block_uploads_committed", - "Count of number of block uploads committed"), - STREAM_WRITE_BLOCK_UPLOADS_ABORTED("stream_write_block_uploads_aborted", - "Count of number of block uploads aborted"), - - STREAM_WRITE_BLOCK_UPLOADS_PENDING("stream_write_block_uploads_pending", - "Gauge of block/partitions uploads queued to be written"), - STREAM_WRITE_BLOCK_UPLOADS_DATA_PENDING( - "stream_write_block_uploads_data_pending", - "Gauge of block/partitions data uploads queued to be written"), - STREAM_WRITE_TOTAL_TIME("stream_write_total_time", - "Count of total time taken for uploads to complete"), - STREAM_WRITE_TOTAL_DATA("stream_write_total_data", - "Count of total data uploaded in block output"), - STREAM_WRITE_QUEUE_DURATION("stream_write_queue_duration", - "Total queue duration of all block uploads"), - - // S3guard committer stats + "Total number of fake directory deletes submitted to object store.", + TYPE_COUNTER), + IGNORED_ERRORS("ignored_errors", "Errors caught and ignored", + TYPE_COUNTER), + + INVOCATION_ABORT( + StoreStatisticNames.OP_ABORT, + "Calls of abort()", + TYPE_DURATION), + INVOCATION_COPY_FROM_LOCAL_FILE( + StoreStatisticNames.OP_COPY_FROM_LOCAL_FILE, + "Calls of copyFromLocalFile()", + TYPE_COUNTER), + INVOCATION_CREATE( + StoreStatisticNames.OP_CREATE, + "Calls of create()", + TYPE_COUNTER), + INVOCATION_CREATE_NON_RECURSIVE( + StoreStatisticNames.OP_CREATE_NON_RECURSIVE, + "Calls of createNonRecursive()", + TYPE_COUNTER), + INVOCATION_DELETE( + StoreStatisticNames.OP_DELETE, + "Calls of delete()", + TYPE_COUNTER), + INVOCATION_EXISTS( + StoreStatisticNames.OP_EXISTS, + "Calls of exists()", + TYPE_COUNTER), + INVOCATION_GET_DELEGATION_TOKEN( + StoreStatisticNames.OP_GET_DELEGATION_TOKEN, + "Calls of getDelegationToken()", + TYPE_COUNTER), + INVOCATION_GET_FILE_CHECKSUM( + StoreStatisticNames.OP_GET_FILE_CHECKSUM, + "Calls of getFileChecksum()", + TYPE_COUNTER), + INVOCATION_GET_FILE_STATUS( + StoreStatisticNames.OP_GET_FILE_STATUS, + "Calls of getFileStatus()", + TYPE_COUNTER), + INVOCATION_GLOB_STATUS( + StoreStatisticNames.OP_GLOB_STATUS, + "Calls of globStatus()", + TYPE_COUNTER), + INVOCATION_IS_DIRECTORY( + StoreStatisticNames.OP_IS_DIRECTORY, + "Calls of isDirectory()", + TYPE_COUNTER), + INVOCATION_IS_FILE( + StoreStatisticNames.OP_IS_FILE, + "Calls of isFile()", + TYPE_COUNTER), + INVOCATION_LIST_FILES( + StoreStatisticNames.OP_LIST_FILES, + "Calls of listFiles()", + TYPE_COUNTER), + INVOCATION_LIST_LOCATED_STATUS( + StoreStatisticNames.OP_LIST_LOCATED_STATUS, + "Calls of listLocatedStatus()", + TYPE_COUNTER), + INVOCATION_LIST_STATUS( + StoreStatisticNames.OP_LIST_STATUS, + "Calls of listStatus()", + TYPE_COUNTER), + INVOCATION_MKDIRS( + StoreStatisticNames.OP_MKDIRS, + "Calls of mkdirs()", + TYPE_COUNTER), + INVOCATION_OPEN( + StoreStatisticNames.OP_OPEN, + "Calls of open()", + TYPE_COUNTER), + INVOCATION_RENAME( + StoreStatisticNames.OP_RENAME, + "Calls of rename()", + TYPE_COUNTER), + + /* The XAttr API metrics are all durations */ + INVOCATION_XATTR_GET_MAP( + StoreStatisticNames.OP_XATTR_GET_MAP, + "Calls of getXAttrs(Path path)", + TYPE_DURATION), + INVOCATION_XATTR_GET_NAMED( + StoreStatisticNames.OP_XATTR_GET_NAMED, + "Calls of getXAttr(Path, String)", + TYPE_DURATION), + INVOCATION_XATTR_GET_NAMED_MAP( + StoreStatisticNames.OP_XATTR_GET_NAMED_MAP, + "Calls of xattr()", + TYPE_DURATION), + INVOCATION_OP_XATTR_LIST( + StoreStatisticNames.OP_XATTR_LIST, + "Calls of getXAttrs(Path path, List names)", + TYPE_DURATION), + + /* Object IO */ + OBJECT_COPY_REQUESTS(StoreStatisticNames.OBJECT_COPY_REQUESTS, + "Object copy requests", + TYPE_COUNTER), + OBJECT_DELETE_REQUEST(StoreStatisticNames.OBJECT_DELETE_REQUEST, + "Object delete requests", + TYPE_DURATION), + OBJECT_BULK_DELETE_REQUEST(StoreStatisticNames.OBJECT_BULK_DELETE_REQUEST, + "Object bulk delete requests", + TYPE_DURATION), + OBJECT_DELETE_OBJECTS(StoreStatisticNames.OBJECT_DELETE_OBJECTS, + "Objects deleted in delete requests", + TYPE_COUNTER), + OBJECT_LIST_REQUEST(StoreStatisticNames.OBJECT_LIST_REQUEST, + "Count of object listings made", + TYPE_DURATION), + OBJECT_CONTINUE_LIST_REQUESTS( + StoreStatisticNames.OBJECT_CONTINUE_LIST_REQUEST, + "Count of continued object listings made", + TYPE_DURATION), + OBJECT_METADATA_REQUESTS( + StoreStatisticNames.OBJECT_METADATA_REQUESTS, + "Count of requests for object metadata", + TYPE_COUNTER), + OBJECT_MULTIPART_UPLOAD_INITIATED( + StoreStatisticNames.OBJECT_MULTIPART_UPLOAD_INITIATED, + "Object multipart upload initiated", + TYPE_COUNTER), + OBJECT_MULTIPART_UPLOAD_ABORTED( + StoreStatisticNames.OBJECT_MULTIPART_UPLOAD_ABORTED, + "Object multipart upload aborted", + TYPE_COUNTER), + OBJECT_PUT_REQUESTS( + StoreStatisticNames.OBJECT_PUT_REQUEST, + "Object put/multipart upload count", + TYPE_COUNTER), + OBJECT_PUT_REQUESTS_COMPLETED( + StoreStatisticNames.OBJECT_PUT_REQUEST_COMPLETED, + "Object put/multipart upload completed count", + TYPE_COUNTER), + OBJECT_PUT_REQUESTS_ACTIVE( + StoreStatisticNames.OBJECT_PUT_REQUEST_ACTIVE, + "Current number of active put requests", + TYPE_GAUGE), + OBJECT_PUT_BYTES( + StoreStatisticNames.OBJECT_PUT_BYTES, + "number of bytes uploaded", + TYPE_COUNTER), + OBJECT_PUT_BYTES_PENDING( + StoreStatisticNames.OBJECT_PUT_BYTES_PENDING, + "number of bytes queued for upload/being actively uploaded", + TYPE_GAUGE), + OBJECT_SELECT_REQUESTS( + StoreStatisticNames.OBJECT_SELECT_REQUESTS, + "Count of S3 Select requests issued", + TYPE_COUNTER), + STREAM_READ_ABORTED( + StreamStatisticNames.STREAM_READ_ABORTED, + "Count of times the TCP stream was aborted", + TYPE_COUNTER), + + /* Stream Reads */ + STREAM_READ_BYTES( + StreamStatisticNames.STREAM_READ_BYTES, + "Bytes read from an input stream in read() calls", + TYPE_COUNTER), + STREAM_READ_BYTES_DISCARDED_ABORT( + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_ABORT, + "Count of bytes discarded by aborting an input stream", + TYPE_COUNTER), + STREAM_READ_BYTES_READ_CLOSE( + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_CLOSE, + "Count of bytes read and discarded when closing an input stream", + TYPE_COUNTER), + STREAM_READ_CLOSED( + StreamStatisticNames.STREAM_READ_CLOSED, + "Count of times the TCP stream was closed", + TYPE_COUNTER), + STREAM_READ_CLOSE_OPERATIONS( + StreamStatisticNames.STREAM_READ_CLOSE_OPERATIONS, + "Total count of times an attempt to close an input stream was made", + TYPE_COUNTER), + STREAM_READ_EXCEPTIONS( + StreamStatisticNames.STREAM_READ_EXCEPTIONS, + "Count of exceptions raised during input stream reads", + TYPE_COUNTER), + STREAM_READ_FULLY_OPERATIONS( + StreamStatisticNames.STREAM_READ_FULLY_OPERATIONS, + "Count of readFully() operations in an input stream", + TYPE_COUNTER), + STREAM_READ_OPENED( + StreamStatisticNames.STREAM_READ_OPENED, + "Total count of times an input stream to object store data was opened", + TYPE_COUNTER), + STREAM_READ_OPERATIONS( + StreamStatisticNames.STREAM_READ_OPERATIONS, + "Count of read() operations in an input stream", + TYPE_COUNTER), + STREAM_READ_OPERATIONS_INCOMPLETE( + StreamStatisticNames.STREAM_READ_OPERATIONS_INCOMPLETE, + "Count of incomplete read() operations in an input stream", + TYPE_COUNTER), + STREAM_READ_VERSION_MISMATCHES( + StreamStatisticNames.STREAM_READ_VERSION_MISMATCHES, + "Count of version mismatches encountered while reading an input stream", + TYPE_COUNTER), + STREAM_READ_SEEK_BACKWARD_OPERATIONS( + StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS, + "Count of executed seek operations which went backwards in a stream", + TYPE_COUNTER), + STREAM_READ_SEEK_BYTES_BACKWARDS( + StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS, + "Count of bytes moved backwards during seek operations" + + " in an input stream", + TYPE_COUNTER), + STREAM_READ_SEEK_BYTES_DISCARDED( + StreamStatisticNames.STREAM_READ_SEEK_BYTES_DISCARDED, + "Count of bytes read and discarded during seek() in an input stream", + TYPE_COUNTER), + STREAM_READ_SEEK_BYTES_SKIPPED( + StreamStatisticNames.STREAM_READ_SEEK_BYTES_SKIPPED, + "Count of bytes skipped during forward seek operations" + + " an input stream", + TYPE_COUNTER), + STREAM_READ_SEEK_FORWARD_OPERATIONS( + StreamStatisticNames.STREAM_READ_SEEK_FORWARD_OPERATIONS, + "Count of executed seek operations which went forward in" + + " an input stream", + TYPE_COUNTER), + STREAM_READ_SEEK_OPERATIONS( + StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS, + "Count of seek operations in an input stream", + TYPE_COUNTER), + STREAM_READ_SEEK_POLICY_CHANGED( + StreamStatisticNames.STREAM_READ_SEEK_POLICY_CHANGED, + "Count of times the seek policy was dynamically changed" + + " in an input stream", + TYPE_COUNTER), + STREAM_READ_TOTAL_BYTES( + StreamStatisticNames.STREAM_READ_TOTAL_BYTES, + "Total count of bytes read from an input stream", + TYPE_COUNTER), + + /* Stream Write statistics */ + + STREAM_WRITE_EXCEPTIONS( + StreamStatisticNames.STREAM_WRITE_EXCEPTIONS, + "Count of stream write failures reported", + TYPE_COUNTER), + STREAM_WRITE_EXCEPTIONS_COMPLETING_UPLOADS( + StreamStatisticNames.STREAM_WRITE_EXCEPTIONS_COMPLETING_UPLOADS, + "Count of failures when finalizing a multipart upload", + TYPE_COUNTER), + STREAM_WRITE_BLOCK_UPLOADS( + StreamStatisticNames.STREAM_WRITE_BLOCK_UPLOADS, + "Count of block/partition uploads completed", + TYPE_COUNTER), + STREAM_WRITE_BLOCK_UPLOADS_ACTIVE( + StreamStatisticNames.STREAM_WRITE_BLOCK_UPLOADS_ACTIVE, + "Count of block/partition uploads active", + TYPE_GAUGE), + STREAM_WRITE_BLOCK_UPLOADS_COMMITTED( + StreamStatisticNames.STREAM_WRITE_BLOCK_UPLOADS_COMMITTED, + "Count of number of block uploads committed", + TYPE_COUNTER), + STREAM_WRITE_BLOCK_UPLOADS_ABORTED( + StreamStatisticNames.STREAM_WRITE_BLOCK_UPLOADS_ABORTED, + "Count of number of block uploads aborted", + TYPE_COUNTER), + + STREAM_WRITE_BLOCK_UPLOADS_PENDING( + StreamStatisticNames.STREAM_WRITE_BLOCK_UPLOADS_PENDING, + "Gauge of block/partitions uploads queued to be written", + TYPE_GAUGE), + STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING( + StreamStatisticNames.STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING, + "Gauge of data queued to be written", + TYPE_GAUGE), + STREAM_WRITE_TOTAL_TIME( + StreamStatisticNames.STREAM_WRITE_TOTAL_TIME, + "Count of total time taken for uploads to complete", + TYPE_COUNTER), + STREAM_WRITE_TOTAL_DATA(StreamStatisticNames.STREAM_WRITE_TOTAL_DATA, + "Count of total data uploaded", + TYPE_COUNTER), + STREAM_WRITE_BYTES( + StreamStatisticNames.STREAM_WRITE_BYTES, + "Count of bytes written to output stream" + + " (including all not yet uploaded)", + TYPE_COUNTER), + STREAM_WRITE_QUEUE_DURATION( + StreamStatisticNames.STREAM_WRITE_QUEUE_DURATION, + "Total queue duration of all block uploads", + TYPE_DURATION), + + /* committer stats */ COMMITTER_COMMITS_CREATED( "committer_commits_created", - "Number of files to commit created"), + "Count of files to commit created", + TYPE_COUNTER), COMMITTER_COMMITS_COMPLETED( "committer_commits_completed", - "Number of files committed"), + "Count of files committed", + TYPE_COUNTER), + COMMITTER_COMMIT_JOB( + "committer_commit_job", + "Duration Tracking of time to commit an entire job", + TYPE_DURATION), COMMITTER_JOBS_SUCCEEDED( "committer_jobs_completed", - "Number of successful jobs"), + "Count of successful jobs", + TYPE_COUNTER), COMMITTER_JOBS_FAILED( "committer_jobs_failed", - "Number of failed jobs"), + "Count of failed jobs", + TYPE_COUNTER), COMMITTER_TASKS_SUCCEEDED( "committer_tasks_completed", - "Number of successful tasks"), + "Count of successful tasks", + TYPE_COUNTER), COMMITTER_TASKS_FAILED( "committer_tasks_failed", - "Number of failed tasks"), + "Count of failed tasks", + TYPE_COUNTER), COMMITTER_BYTES_COMMITTED( "committer_bytes_committed", - "Amount of data committed"), + "Amount of data committed", + TYPE_COUNTER), COMMITTER_BYTES_UPLOADED( "committer_bytes_uploaded", - "Number of bytes uploaded duing commit operations"), + "Count of bytes uploaded duing commit operations", + TYPE_COUNTER), COMMITTER_COMMITS_FAILED( - "committer_commits_failed", - "Number of commits failed"), + "committer_commits"+ StoreStatisticNames.SUFFIX_FAILURES, + "Count of commits failed", + TYPE_COUNTER), COMMITTER_COMMITS_ABORTED( "committer_commits_aborted", - "Number of commits aborted"), + "Count of commits aborted", + TYPE_COUNTER), COMMITTER_COMMITS_REVERTED( "committer_commits_reverted", - "Number of commits reverted"), + "Count of commits reverted", + TYPE_COUNTER), COMMITTER_MAGIC_FILES_CREATED( "committer_magic_files_created", - "Number of files created under 'magic' paths"), + "Count of files created under 'magic' paths", + TYPE_COUNTER), + COMMITTER_MATERIALIZE_FILE( + "committer_materialize_file", + "Duration Tracking of time to materialize a file in job commit", + TYPE_DURATION), + COMMITTER_STAGE_FILE_UPLOAD( + "committer_stage_file_upload", + "Duration Tracking of files uploaded from a local staging path", + TYPE_DURATION), - // S3guard stats + /* S3guard stats */ S3GUARD_METADATASTORE_PUT_PATH_REQUEST( "s3guard_metadatastore_put_path_request", - "S3Guard metadata store put one metadata path request"), + "S3Guard metadata store put one metadata path request", + TYPE_COUNTER), S3GUARD_METADATASTORE_PUT_PATH_LATENCY( "s3guard_metadatastore_put_path_latency", - "S3Guard metadata store put one metadata path latency"), - S3GUARD_METADATASTORE_INITIALIZATION("s3guard_metadatastore_initialization", - "S3Guard metadata store initialization times"), + "S3Guard metadata store put one metadata path latency", + TYPE_QUANTILE), + S3GUARD_METADATASTORE_INITIALIZATION( + "s3guard_metadatastore_initialization", + "S3Guard metadata store initialization times", + TYPE_COUNTER), S3GUARD_METADATASTORE_RECORD_DELETES( "s3guard_metadatastore_record_deletes", - "S3Guard metadata store records deleted"), + "S3Guard metadata store records deleted", + TYPE_COUNTER), S3GUARD_METADATASTORE_RECORD_READS( "s3guard_metadatastore_record_reads", - "S3Guard metadata store records read"), + "S3Guard metadata store records read", + TYPE_COUNTER), S3GUARD_METADATASTORE_RECORD_WRITES( "s3guard_metadatastore_record_writes", - "S3Guard metadata store records written"), + "S3Guard metadata store records written", + TYPE_COUNTER), S3GUARD_METADATASTORE_RETRY("s3guard_metadatastore_retry", - "S3Guard metadata store retry events"), + "S3Guard metadata store retry events", + TYPE_COUNTER), S3GUARD_METADATASTORE_THROTTLED("s3guard_metadatastore_throttled", - "S3Guard metadata store throttled events"), + "S3Guard metadata store throttled events", + TYPE_COUNTER), S3GUARD_METADATASTORE_THROTTLE_RATE( "s3guard_metadatastore_throttle_rate", - "S3Guard metadata store throttle rate"), + "S3Guard metadata store throttle rate", + TYPE_QUANTILE), S3GUARD_METADATASTORE_AUTHORITATIVE_DIRECTORIES_UPDATED( "s3guard_metadatastore_authoritative_directories_updated", - "S3Guard metadata store authoritative directories updated from S3"), + "S3Guard metadata store authoritative directories updated from S3", + TYPE_COUNTER), + + + /* General Store operations */ + STORE_IO_REQUEST(StoreStatisticNames.STORE_IO_REQUEST, + "requests made of the remote store", + TYPE_COUNTER), + + STORE_IO_RETRY(StoreStatisticNames.STORE_IO_RETRY, + "retried requests made of the remote store", + TYPE_COUNTER), - STORE_IO_THROTTLED("store_io_throttled", "Requests throttled and retried"), - STORE_IO_THROTTLE_RATE("store_io_throttle_rate", - "Rate of S3 request throttling"), + STORE_IO_THROTTLED( + StoreStatisticNames.STORE_IO_THROTTLED, + "Requests throttled and retried", + TYPE_COUNTER), + STORE_IO_THROTTLE_RATE( + StoreStatisticNames.STORE_IO_THROTTLE_RATE, + "Rate of S3 request throttling", + TYPE_QUANTILE), + + /* + * Delegation Token Operations. + */ + DELEGATION_TOKENS_ISSUED( + StoreStatisticNames.DELEGATION_TOKENS_ISSUED, + "Count of delegation tokens issued", + TYPE_DURATION), + + /* Multipart Upload API */ + + MULTIPART_UPLOAD_INSTANTIATED( + StoreStatisticNames.MULTIPART_UPLOAD_INSTANTIATED, + "Multipart Uploader Instantiated", + TYPE_COUNTER), + MULTIPART_UPLOAD_PART_PUT( + StoreStatisticNames.MULTIPART_UPLOAD_PART_PUT, + "Multipart Part Put Operation", + TYPE_COUNTER), + MULTIPART_UPLOAD_PART_PUT_BYTES( + StoreStatisticNames.MULTIPART_UPLOAD_PART_PUT_BYTES, + "Multipart Part Put Bytes", + TYPE_COUNTER), + MULTIPART_UPLOAD_ABORTED( + StoreStatisticNames.MULTIPART_UPLOAD_ABORTED, + "Multipart Upload Aborted", + TYPE_COUNTER), + MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED( + StoreStatisticNames.MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED, + "Multipart Upload Abort Unner Path Invoked", + TYPE_COUNTER), + MULTIPART_UPLOAD_COMPLETED( + StoreStatisticNames.MULTIPART_UPLOAD_COMPLETED, + "Multipart Upload Completed", + TYPE_COUNTER), + MULTIPART_UPLOAD_STARTED( + StoreStatisticNames.MULTIPART_UPLOAD_STARTED, + "Multipart Upload Started", + TYPE_COUNTER); - DELEGATION_TOKENS_ISSUED("delegation_tokens_issued", - "Number of delegation tokens issued"); + /** + * A map used to support the {@link #fromSymbol(String)} call. + */ private static final Map SYMBOL_MAP = new HashMap<>(Statistic.values().length); static { @@ -244,14 +547,28 @@ public enum Statistic { } } - Statistic(String symbol, String description) { + + /** + * Statistic definition. + * @param symbol name + * @param description description. + * @param type type + */ + Statistic(String symbol, String description, StatisticTypeEnum type) { this.symbol = symbol; this.description = description; + this.type = type; } + /** Statistic name. */ private final String symbol; + + /** Statistic description. */ private final String description; + /** Statistic type. */ + private final StatisticTypeEnum type; + public String getSymbol() { return symbol; } @@ -278,4 +595,12 @@ public String getDescription() { public String toString() { return symbol; } + + /** + * What type is this statistic? + * @return the type. + */ + public StatisticTypeEnum getType() { + return type; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java index 602732b6d3250..9bdf61c22a1ff 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java @@ -25,6 +25,7 @@ import java.io.InputStream; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import com.amazonaws.services.s3.model.AmazonS3Exception; @@ -41,7 +42,7 @@ import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; import com.amazonaws.services.s3.transfer.model.UploadResult; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,14 +50,20 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.fs.s3a.s3guard.S3Guard; import org.apache.hadoop.fs.s3a.select.SelectBinding; import org.apache.hadoop.util.DurationInfo; +import org.apache.hadoop.util.functional.CallableRaisingIOE; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Invoker.*; +import static org.apache.hadoop.fs.s3a.S3AUtils.longOption; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.UPLOAD_PART_COUNT_LIMIT; /** * Helper for low-level operations against an S3 Bucket for writing data, @@ -83,7 +90,7 @@ */ @InterfaceAudience.Private @InterfaceStability.Unstable -public class WriteOperationHelper { +public class WriteOperationHelper implements WriteOperations { private static final Logger LOG = LoggerFactory.getLogger(WriteOperationHelper.class); @@ -104,17 +111,26 @@ public class WriteOperationHelper { /** Bucket of the owner FS. */ private final String bucket; + /** + * statistics context. + */ + private final S3AStatisticsContext statisticsContext; + /** * Constructor. * @param owner owner FS creating the helper * @param conf Configuration object + * @param statisticsContext statistics context * */ - protected WriteOperationHelper(S3AFileSystem owner, Configuration conf) { + protected WriteOperationHelper(S3AFileSystem owner, + Configuration conf, + S3AStatisticsContext statisticsContext) { this.owner = owner; this.invoker = new Invoker(new S3ARetryPolicy(conf), this::operationRetried); this.conf = conf; + this.statisticsContext = statisticsContext; bucket = owner.getBucket(); } @@ -127,24 +143,26 @@ protected WriteOperationHelper(S3AFileSystem owner, Configuration conf) { */ void operationRetried(String text, Exception ex, int retries, boolean idempotent) { + LOG.info("{}: Retried {}: {}", text, retries, ex.toString()); + LOG.debug("Stack", ex); owner.operationRetried(text, ex, retries, idempotent); } /** * Execute a function with retry processing. + * @param type of return value * @param action action to execute (used in error messages) * @param path path of work (used in error messages) * @param idempotent does the operation have semantics * which mean that it can be retried even if was already executed? * @param operation operation to execute - * @param type of return value * @return the result of the call * @throws IOException any IOE raised, or translated exception */ public T retry(String action, String path, boolean idempotent, - Invoker.Operation operation) + CallableRaisingIOE operation) throws IOException { return invoker.retry(action, path, idempotent, operation); @@ -155,12 +173,19 @@ public T retry(String action, * @param destKey destination key * @param inputStream source data. * @param length size, if known. Use -1 for not known + * @param headers optional map of custom headers. * @return the request */ public PutObjectRequest createPutObjectRequest(String destKey, - InputStream inputStream, long length) { + InputStream inputStream, + long length, + final Map headers) { + ObjectMetadata objectMetadata = newObjectMetadata(length); + if (headers != null) { + objectMetadata.setUserMetadata(headers); + } return owner.newPutObjectRequest(destKey, - newObjectMetadata(length), + objectMetadata, inputStream); } @@ -250,11 +275,11 @@ private CompleteMultipartUploadResult finalizeMultipartUpload( Retried retrying, @Nullable BulkOperationState operationState) throws IOException { if (partETags.isEmpty()) { - throw new IOException( - "No upload parts in multipart upload to " + destKey); + throw new PathIOException(destKey, + "No upload parts in multipart upload"); } CompleteMultipartUploadResult uploadResult = - invoker.retry("Completing multipart commit", destKey, + invoker.retry("Completing multipart upload", destKey, true, retrying, () -> { @@ -311,19 +336,32 @@ public CompleteMultipartUploadResult completeMPUwithRetries( * Abort a multipart upload operation. * @param destKey destination key of the upload * @param uploadId multipart operation Id + * @param shouldRetry should failures trigger a retry? * @param retrying callback invoked on every retry * @throws IOException failure to abort * @throws FileNotFoundException if the abort ID is unknown */ @Retries.RetryTranslated public void abortMultipartUpload(String destKey, String uploadId, - Retried retrying) + boolean shouldRetry, Retried retrying) throws IOException { - invoker.retry("Aborting multipart upload", destKey, true, - retrying, - () -> owner.abortMultipartUpload( - destKey, - uploadId)); + if (shouldRetry) { + // retrying option + invoker.retry("Aborting multipart upload ID " + uploadId, + destKey, + true, + retrying, + () -> owner.abortMultipartUpload( + destKey, + uploadId)); + } else { + // single pass attempt. + once("Aborting multipart upload ID " + uploadId, + destKey, + () -> owner.abortMultipartUpload( + destKey, + uploadId)); + } } /** @@ -374,7 +412,7 @@ public int abortMultipartUploadsUnderPath(String prefix) @Retries.RetryTranslated public void abortMultipartCommit(String destKey, String uploadId) throws IOException { - abortMultipartUpload(destKey, uploadId, invoker.getRetryCallback()); + abortMultipartUpload(destKey, uploadId, true, invoker.getRetryCallback()); } /** @@ -384,6 +422,7 @@ public void abortMultipartCommit(String destKey, String uploadId) * A subset of the file may be posted, by providing the starting point * in {@code offset} and a length of block in {@code size} equal to * or less than the remaining bytes. + * The part number must be less than 10000. * @param destKey destination key of ongoing operation * @param uploadId ID of ongoing upload * @param partNumber current part number of the upload @@ -392,6 +431,8 @@ public void abortMultipartCommit(String destKey, String uploadId) * @param sourceFile optional source file. * @param offset offset in file to start reading. * @return the request. + * @throws IllegalArgumentException if the parameters are invalid -including + * @throws PathIOException if the part number is out of range. */ public UploadPartRequest newUploadPartRequest( String destKey, @@ -400,18 +441,32 @@ public UploadPartRequest newUploadPartRequest( int size, InputStream uploadStream, File sourceFile, - Long offset) { + Long offset) throws PathIOException { checkNotNull(uploadId); // exactly one source must be set; xor verifies this checkArgument((uploadStream != null) ^ (sourceFile != null), "Data source"); checkArgument(size >= 0, "Invalid partition size %s", size); - checkArgument(partNumber > 0 && partNumber <= 10000, - "partNumber must be between 1 and 10000 inclusive, but is %s", - partNumber); + checkArgument(partNumber > 0, + "partNumber must be between 1 and %s inclusive, but is %s", + DEFAULT_UPLOAD_PART_COUNT_LIMIT, partNumber); LOG.debug("Creating part upload request for {} #{} size {}", uploadId, partNumber, size); + long partCountLimit = longOption(conf, + UPLOAD_PART_COUNT_LIMIT, + DEFAULT_UPLOAD_PART_COUNT_LIMIT, + 1); + if (partCountLimit != DEFAULT_UPLOAD_PART_COUNT_LIMIT) { + LOG.warn("Configuration property {} shouldn't be overridden by client", + UPLOAD_PART_COUNT_LIMIT); + } + final String pathErrorMsg = "Number of parts in multipart upload exceeded." + + " Current part count = %s, Part count limit = %s "; + if (partNumber > partCountLimit) { + throw new PathIOException(destKey, + String.format(pathErrorMsg, partNumber, partCountLimit)); + } UploadPartRequest request = new UploadPartRequest() .withBucketName(bucket) .withKey(destKey) @@ -539,8 +594,20 @@ public CompleteMultipartUploadResult commitUpload( */ public BulkOperationState initiateCommitOperation( Path path) throws IOException { + return initiateOperation(path, BulkOperationState.OperationType.Commit); + } + + /** + * Initiate a commit operation through any metastore. + * @param path path under which the writes will all take place. + * @param operationType operation to initiate + * @return an possibly null operation state from the metastore. + * @throws IOException failure to instantiate. + */ + public BulkOperationState initiateOperation(final Path path, + final BulkOperationState.OperationType operationType) throws IOException { return S3Guard.initiateBulkWrite(owner.getMetadataStore(), - BulkOperationState.OperationType.Commit, path); + operationType, path); } /** @@ -552,7 +619,8 @@ public BulkOperationState initiateCommitOperation( @Retries.RetryTranslated public UploadPartResult uploadPart(UploadPartRequest request) throws IOException { - return retry("upload part", + return retry("upload part #" + request.getPartNumber() + + " upload ID "+ request.getUploadId(), request.getKey(), true, () -> owner.uploadPart(request)); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java new file mode 100644 index 0000000000000..09b9cc924c6a4 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperations.java @@ -0,0 +1,341 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.MultipartUpload; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.PutObjectResult; +import com.amazonaws.services.s3.model.SelectObjectContentRequest; +import com.amazonaws.services.s3.model.SelectObjectContentResult; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.amazonaws.services.s3.transfer.model.UploadResult; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.util.functional.CallableRaisingIOE; + +/** + * Operations to update the store. + * This is effectively a private internal API for classes used as part + * of the S3A implementation. + * New extension points SHOULD use this interface -provided there + * is no plan to backport to previous versions. In those situations, + * use `WriteOperationHelper` directly. + * @since Hadoop 3.3.0 + */ +public interface WriteOperations { + + /** + * Execute a function with retry processing. + * @param type of return value + * @param action action to execute (used in error messages) + * @param path path of work (used in error messages) + * @param idempotent does the operation have semantics + * which mean that it can be retried even if was already executed? + * @param operation operation to execute + * @return the result of the call + * @throws IOException any IOE raised, or translated exception + */ + T retry(String action, + String path, + boolean idempotent, + CallableRaisingIOE operation) + throws IOException; + + /** + * Create a {@link PutObjectRequest} request against the specific key. + * @param destKey destination key + * @param inputStream source data. + * @param length size, if known. Use -1 for not known + * @param headers optional map of custom headers. + * @return the request + */ + PutObjectRequest createPutObjectRequest(String destKey, + InputStream inputStream, + long length, + @Nullable Map headers); + + /** + * Create a {@link PutObjectRequest} request to upload a file. + * @param dest key to PUT to. + * @param sourceFile source file + * @return the request + */ + PutObjectRequest createPutObjectRequest(String dest, + File sourceFile); + + /** + * Callback on a successful write. + * @param length length of the write + */ + void writeSuccessful(long length); + + /** + * Callback on a write failure. + * @param ex Any exception raised which triggered the failure. + */ + void writeFailed(Exception ex); + + /** + * Create a new object metadata instance. + * Any standard metadata headers are added here, for example: + * encryption. + * @param length size, if known. Use -1 for not known + * @return a new metadata instance + */ + ObjectMetadata newObjectMetadata(long length); + + /** + * Start the multipart upload process. + * Retry policy: retrying, translated. + * @param destKey destination of upload + * @return the upload result containing the ID + * @throws IOException IO problem + */ + @Retries.RetryTranslated + String initiateMultiPartUpload(String destKey) throws IOException; + + /** + * This completes a multipart upload to the destination key via + * {@code finalizeMultipartUpload()}. + * Retry policy: retrying, translated. + * Retries increment the {@code errorCount} counter. + * @param destKey destination + * @param uploadId multipart operation Id + * @param partETags list of partial uploads + * @param length length of the upload + * @param errorCount a counter incremented by 1 on every error; for + * use in statistics + * @return the result of the operation. + * @throws IOException if problems arose which could not be retried, or + * the retry count was exceeded + */ + @Retries.RetryTranslated + CompleteMultipartUploadResult completeMPUwithRetries( + String destKey, + String uploadId, + List partETags, + long length, + AtomicInteger errorCount) + throws IOException; + + /** + * Abort a multipart upload operation. + * @param destKey destination key of the upload + * @param uploadId multipart operation Id + * @param shouldRetry should failures trigger a retry? + * @param retrying callback invoked on every retry + * @throws IOException failure to abort + * @throws FileNotFoundException if the abort ID is unknown + */ + @Retries.RetryTranslated + void abortMultipartUpload(String destKey, String uploadId, + boolean shouldRetry, Invoker.Retried retrying) + throws IOException; + + /** + * Abort a multipart commit operation. + * @param upload upload to abort. + * @throws IOException on problems. + */ + @Retries.RetryTranslated + void abortMultipartUpload(MultipartUpload upload) + throws IOException; + + /** + * Abort multipart uploads under a path: limited to the first + * few hundred. + * @param prefix prefix for uploads to abort + * @return a count of aborts + * @throws IOException trouble; FileNotFoundExceptions are swallowed. + */ + @Retries.RetryTranslated + int abortMultipartUploadsUnderPath(String prefix) + throws IOException; + + /** + * Abort a multipart commit operation. + * @param destKey destination key of ongoing operation + * @param uploadId multipart operation Id + * @throws IOException on problems. + * @throws FileNotFoundException if the abort ID is unknown + */ + @Retries.RetryTranslated + void abortMultipartCommit(String destKey, String uploadId) + throws IOException; + + /** + * Create and initialize a part request of a multipart upload. + * Exactly one of: {@code uploadStream} or {@code sourceFile} + * must be specified. + * A subset of the file may be posted, by providing the starting point + * in {@code offset} and a length of block in {@code size} equal to + * or less than the remaining bytes. + * @param destKey destination key of ongoing operation + * @param uploadId ID of ongoing upload + * @param partNumber current part number of the upload + * @param size amount of data + * @param uploadStream source of data to upload + * @param sourceFile optional source file. + * @param offset offset in file to start reading. + * @return the request. + * @throws IllegalArgumentException if the parameters are invalid -including + * @throws PathIOException if the part number is out of range. + */ + UploadPartRequest newUploadPartRequest( + String destKey, + String uploadId, + int partNumber, + int size, + InputStream uploadStream, + File sourceFile, + Long offset) throws PathIOException; + + /** + * PUT an object directly (i.e. not via the transfer manager). + * Byte length is calculated from the file length, or, if there is no + * file, from the content length of the header. + * @param putObjectRequest the request + * @return the upload initiated + * @throws IOException on problems + */ + @Retries.RetryTranslated + PutObjectResult putObject(PutObjectRequest putObjectRequest) + throws IOException; + + /** + * PUT an object via the transfer manager. + * @param putObjectRequest the request + * @return the result of the operation + * @throws IOException on problems + */ + @Retries.RetryTranslated + UploadResult uploadObject(PutObjectRequest putObjectRequest) + throws IOException; + + /** + * Revert a commit by deleting the file. + * Relies on retry code in filesystem + * @throws IOException on problems + * @param destKey destination key + * @param operationState operational state for a bulk update + */ + @Retries.OnceTranslated + void revertCommit(String destKey, + @Nullable BulkOperationState operationState) throws IOException; + + /** + * This completes a multipart upload to the destination key via + * {@code finalizeMultipartUpload()}. + * Retry policy: retrying, translated. + * Retries increment the {@code errorCount} counter. + * @param destKey destination + * @param uploadId multipart operation Id + * @param partETags list of partial uploads + * @param length length of the upload + * @param operationState operational state for a bulk update + * @return the result of the operation. + * @throws IOException if problems arose which could not be retried, or + * the retry count was exceeded + */ + @Retries.RetryTranslated + CompleteMultipartUploadResult commitUpload( + String destKey, + String uploadId, + List partETags, + long length, + @Nullable BulkOperationState operationState) + throws IOException; + + /** + * Initiate a commit operation through any metastore. + * @param path path under which the writes will all take place. + * @return an possibly null operation state from the metastore. + * @throws IOException failure to instantiate. + */ + BulkOperationState initiateCommitOperation( + Path path) throws IOException; + + /** + * Initiate a commit operation through any metastore. + * @param path path under which the writes will all take place. + * @param operationType operation to initiate + * @return an possibly null operation state from the metastore. + * @throws IOException failure to instantiate. + */ + BulkOperationState initiateOperation(Path path, + BulkOperationState.OperationType operationType) throws IOException; + + /** + * Upload part of a multi-partition file. + * @param request request + * @return the result of the operation. + * @throws IOException on problems + */ + @Retries.RetryTranslated + UploadPartResult uploadPart(UploadPartRequest request) + throws IOException; + + /** + * Get the configuration of this instance; essentially the owning + * filesystem configuration. + * @return the configuration. + */ + Configuration getConf(); + + /** + * Create a S3 Select request for the destination path. + * This does not build the query. + * @param path pre-qualified path for query + * @return the request + */ + SelectObjectContentRequest newSelectRequest(Path path); + + /** + * Execute an S3 Select operation. + * On a failure, the request is only logged at debug to avoid the + * select exception being printed. + * @param source source for selection + * @param request Select request to issue. + * @param action the action for use in exception creation + * @return response + * @throws IOException failure + */ + @Retries.RetryTranslated + SelectObjectContentResult select( + Path source, + SelectObjectContentRequest request, + String action) + throws IOException; +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AbstractSessionCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AbstractSessionCredentialsProvider.java index 42c7d818734fd..816f3e47daab8 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AbstractSessionCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AbstractSessionCredentialsProvider.java @@ -25,7 +25,7 @@ import com.amazonaws.SdkBaseException; import com.amazonaws.auth.AWSCredentials; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java index afad1f8458994..c467702575464 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java @@ -32,8 +32,8 @@ import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider; import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentialBinding.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentialBinding.java index f1b8b48dd7fb0..e7f3b5132cdf6 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentialBinding.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentialBinding.java @@ -33,7 +33,7 @@ import com.amazonaws.auth.BasicSessionCredentials; import com.amazonaws.services.securitytoken.AWSSecurityTokenService; import com.amazonaws.services.securitytoken.model.Credentials; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentialProvider.java index 03e26e7d8c53b..b0a1272256eaa 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentialProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentialProvider.java @@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.CredentialInitializationException; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.auth.MarshalledCredentialBinding.toAWSCredentials; /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentials.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentials.java index e9d6173c46d73..26567b199bb1f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentials.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/MarshalledCredentials.java @@ -29,7 +29,7 @@ import java.util.Objects; import java.util.Optional; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java index 24a5397a590b3..6676b49b6a45a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.util.JsonSerialization; -import static com.google.common.base.Preconditions.checkState; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java index d2e7eb1a6578d..7b632e2d90a7b 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java @@ -23,7 +23,7 @@ import java.util.Collections; import java.util.List; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java index 82811e625ecdb..cb6848a2c026d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java @@ -30,7 +30,7 @@ import com.amazonaws.services.securitytoken.model.AssumeRoleRequest; import com.amazonaws.services.securitytoken.model.Credentials; import com.amazonaws.services.securitytoken.model.GetSessionTokenRequest; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDTService.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDTService.java index b1211545cbe4b..41dac8ad683be 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDTService.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDTService.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.net.URI; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.impl.StoreContext; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractS3ATokenIdentifier.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractS3ATokenIdentifier.java index c86134e03f21a..eb48a28a79add 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractS3ATokenIdentifier.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractS3ATokenIdentifier.java @@ -31,6 +31,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier; +import org.apache.hadoop.util.Time; import static java.util.Objects.requireNonNull; @@ -140,6 +141,7 @@ protected AbstractS3ATokenIdentifier( final URI uri) { super(kind, owner, renewer, realUser); this.uri = requireNonNull(uri); + initializeIssueDate(); } /** @@ -164,6 +166,11 @@ protected AbstractS3ATokenIdentifier( */ protected AbstractS3ATokenIdentifier(final Text kind) { super(kind); + initializeIssueDate(); + } + + private void initializeIssueDate() { + setIssueDate(Time.now()); } public String getBucket() { diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/FullCredentialsTokenBinding.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/FullCredentialsTokenBinding.java index d80e780521d5c..79553175e76f5 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/FullCredentialsTokenBinding.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/FullCredentialsTokenBinding.java @@ -22,6 +22,8 @@ import java.net.URI; import java.util.Optional; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.AWSCredentialProviderList; import org.apache.hadoop.fs.s3a.S3AUtils; @@ -73,7 +75,6 @@ public FullCredentialsTokenBinding() { @Override protected void serviceStart() throws Exception { super.serviceStart(); - loadAWSCredentials(); } /** @@ -116,6 +117,7 @@ private void loadAWSCredentials() throws IOException { @Override public AWSCredentialProviderList deployUnbonded() throws IOException { requireServiceStarted(); + loadAWSCredentials(); return new AWSCredentialProviderList( "Full Credentials Token Binding", new MarshalledCredentialProvider( @@ -142,7 +144,8 @@ public AbstractS3ATokenIdentifier createTokenIdentifier( final EncryptionSecrets encryptionSecrets, final Text renewer) throws IOException { requireServiceStarted(); - + Preconditions.checkNotNull( + awsCredentials, "No AWS credentials to use for a delegation token"); return new FullCredentialsTokenIdentifier(getCanonicalUri(), getOwnerText(), renewer, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/RoleTokenBinding.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/RoleTokenBinding.java index 3c860338559b6..8a9ee30f64e5c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/RoleTokenBinding.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/RoleTokenBinding.java @@ -24,8 +24,8 @@ import java.util.concurrent.TimeUnit; import com.amazonaws.services.securitytoken.model.Credentials; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/S3ADelegationTokens.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/S3ADelegationTokens.java index 5005436c8242a..05917fe9c63fe 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/S3ADelegationTokens.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/S3ADelegationTokens.java @@ -26,7 +26,7 @@ import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,18 +34,21 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.AWSCredentialProviderList; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; import org.apache.hadoop.fs.s3a.auth.RoleModel; import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.statistics.DelegationTokenStatistics; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.util.DurationInfo; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkState; +import static org.apache.hadoop.fs.s3a.Statistic.DELEGATION_TOKENS_ISSUED; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDuration; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkState; import static org.apache.hadoop.fs.s3a.auth.delegation.DelegationConstants.DEFAULT_DELEGATION_TOKEN_BINDING; import static org.apache.hadoop.fs.s3a.auth.delegation.DelegationConstants.DELEGATION_TOKEN_BINDING; import static org.apache.hadoop.fs.s3a.auth.delegation.DelegationConstants.DURATION_LOG_AT_INFO; @@ -134,9 +137,9 @@ public class S3ADelegationTokens extends AbstractDTService { AWSPolicyProvider.AccessLevel.WRITE); /** - * Statistics for the owner FS. + * Statistics for the operations. */ - private S3AInstrumentation.DelegationTokenStatistics stats; + private DelegationTokenStatistics stats; /** * Name of the token binding as extracted from token kind; used for @@ -427,8 +430,10 @@ public Token createDelegationToken( try(DurationInfo ignored = new DurationInfo(LOG, DURATION_LOG_AT_INFO, "Creating New Delegation Token", tokenBinding.getKind())) { - Token token - = tokenBinding.createDelegationToken(rolePolicy, encryptionSecrets, renewer); + Token token = trackDuration(stats, + DELEGATION_TOKENS_ISSUED.getSymbol(), () -> + tokenBinding.createDelegationToken(rolePolicy, + encryptionSecrets, renewer)); if (token != null) { token.setService(service); noteTokenCreated(token); @@ -447,6 +452,19 @@ private void noteTokenCreated(final Token token) { stats.tokenIssued(); } + /** + * Get a null/possibly empty list of extra delegation token issuers. + * These will be asked for tokens when + * {@link DelegationTokenIssuer#getAdditionalTokenIssuers()} recursively + * collects all DTs a filesystem can offer. + * @return a null or empty array. Default implementation: null + * @throws IOException failure + */ + public DelegationTokenIssuer[] getAdditionalTokenIssuers() + throws IOException { + return null; + } + /** * Get the AWS credential provider. * @return the DT credential provider diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/SessionTokenBinding.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/SessionTokenBinding.java index 3e4903122f30b..87fbdf53ccb11 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/SessionTokenBinding.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/SessionTokenBinding.java @@ -30,7 +30,7 @@ import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSSessionCredentials; import com.amazonaws.services.securitytoken.AWSSecurityTokenService; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java index e82fbda63dd0c..f73689979ec25 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitter.java @@ -20,20 +20,24 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.text.DateFormat; import java.util.ArrayList; import java.util.Date; import java.util.List; +import java.util.UUID; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; import com.amazonaws.services.s3.model.MultipartUpload; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -43,9 +47,15 @@ import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; import org.apache.hadoop.fs.s3a.commit.files.SuccessData; +import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.DurationInfo; @@ -54,9 +64,15 @@ import static org.apache.hadoop.fs.s3a.Constants.THREAD_POOL_SHUTDOWN_DELAY_SECONDS; import static org.apache.hadoop.fs.s3a.Invoker.ignoreIOExceptions; import static org.apache.hadoop.fs.s3a.S3AUtils.*; +import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_COMMIT_JOB; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; import static org.apache.hadoop.fs.s3a.commit.CommitUtils.*; import static org.apache.hadoop.fs.s3a.commit.CommitUtilsWithMR.*; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.E_NO_SPARK_UUID; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.FS_S3A_COMMITTER_UUID; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.FS_S3A_COMMITTER_UUID_SOURCE; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.SPARK_WRITE_UUID; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfInvocation; /** * Abstract base class for S3A committers; allows for any commonality @@ -84,12 +100,42 @@ * created by a few tasks, and the HTTP connection pool in the S3A * committer was large enough for more all the parallel POST requests. */ -public abstract class AbstractS3ACommitter extends PathOutputCommitter { +public abstract class AbstractS3ACommitter extends PathOutputCommitter + implements IOStatisticsSource { + private static final Logger LOG = LoggerFactory.getLogger(AbstractS3ACommitter.class); public static final String THREAD_PREFIX = "s3a-committer-pool-"; + /** + * Error string when task setup fails. + */ + @VisibleForTesting + public static final String E_SELF_GENERATED_JOB_UUID + = "has a self-generated job UUID"; + + /** + * Unique ID for a Job. + * In MapReduce Jobs the YARN JobID suffices. + * On Spark this only be the YARN JobID + * it is known to be creating strongly unique IDs + * (i.e. SPARK-33402 is on the branch). + */ + private final String uuid; + + /** + * Source of the {@link #uuid} value. + */ + private final JobUUIDSource uuidSource; + + /** + * Has this instance been used for job setup? + * If so then it is safe for a locally generated + * UUID to be used for task setup. + */ + private boolean jobSetup; + /** * Thread pool for task execution. */ @@ -127,6 +173,8 @@ public abstract class AbstractS3ACommitter extends PathOutputCommitter { /** Should a job marker be created? */ private final boolean createJobMarker; + private final CommitterStatistics committerStatistics; + /** * Create a committer. * This constructor binds the destination directory and configuration, but @@ -146,14 +194,21 @@ protected AbstractS3ACommitter( this.jobContext = context; this.role = "Task committer " + context.getTaskAttemptID(); setConf(context.getConfiguration()); + Pair id = buildJobUUID( + conf, context.getJobID()); + this.uuid = id.getLeft(); + this.uuidSource = id.getRight(); + LOG.info("Job UUID {} source {}", getUUID(), getUUIDSource().getText()); initOutput(outputPath); LOG.debug("{} instantiated for job \"{}\" ID {} with destination {}", role, jobName(context), jobIdString(context), outputPath); S3AFileSystem fs = getDestS3AFS(); - createJobMarker = context.getConfiguration().getBoolean( + this.createJobMarker = context.getConfiguration().getBoolean( CREATE_SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, DEFAULT_CREATE_SUCCESSFUL_JOB_DIR_MARKER); - commitOperations = new CommitOperations(fs); + // the statistics are shared between this committer and its operations. + this.committerStatistics = fs.newCommitterStatistics(); + this.commitOperations = new CommitOperations(fs, committerStatistics); } /** @@ -201,7 +256,7 @@ protected final void setOutputPath(Path outputPath) { * @return the working path. */ @Override - public Path getWorkPath() { + public final Path getWorkPath() { return workPath; } @@ -209,16 +264,16 @@ public Path getWorkPath() { * Set the work path for this committer. * @param workPath the work path to use. */ - protected void setWorkPath(Path workPath) { + protected final void setWorkPath(Path workPath) { LOG.debug("Setting work path to {}", workPath); this.workPath = workPath; } - public Configuration getConf() { + public final Configuration getConf() { return conf; } - protected void setConf(Configuration conf) { + protected final void setConf(Configuration conf) { this.conf = conf; } @@ -307,6 +362,24 @@ public Path getTaskAttemptPath(TaskAttemptContext context) { */ public abstract String getName(); + /** + * The Job UUID, as passed in or generated. + * @return the UUID for the job. + */ + @VisibleForTesting + public final String getUUID() { + return uuid; + } + + /** + * Source of the UUID. + * @return how the job UUID was retrieved/generated. + */ + @VisibleForTesting + public final JobUUIDSource getUUIDSource() { + return uuidSource; + } + @Override public String toString() { final StringBuilder sb = new StringBuilder( @@ -315,6 +388,8 @@ public String toString() { sb.append(", name=").append(getName()); sb.append(", outputPath=").append(getOutputPath()); sb.append(", workPath=").append(workPath); + sb.append(", uuid='").append(getUUID()).append('\''); + sb.append(", uuid source=").append(getUUIDSource()); sb.append('}'); return sb.toString(); } @@ -373,7 +448,12 @@ protected void maybeCreateSuccessMarkerFromCommits(JobContext context, // The list of committed objects in pending is size limited in // ActiveCommit.uploadCommitted. filenames.addAll(pending.committedObjects); - maybeCreateSuccessMarker(context, filenames); + // load in all the pending statistics + IOStatisticsSnapshot snapshot = new IOStatisticsSnapshot( + pending.getIOStatistics()); + snapshot.aggregate(getIOStatistics()); + + maybeCreateSuccessMarker(context, filenames, snapshot); } /** @@ -384,21 +464,26 @@ protected void maybeCreateSuccessMarkerFromCommits(JobContext context, * PUT up a the contents of a {@link SuccessData} file. * @param context job context * @param filenames list of filenames. + * @param ioStatistics any IO Statistics to include * @throws IOException IO failure */ protected void maybeCreateSuccessMarker(JobContext context, - List filenames) + List filenames, + final IOStatisticsSnapshot ioStatistics) throws IOException { if (createJobMarker) { // create a success data structure and then save it SuccessData successData = new SuccessData(); successData.setCommitter(getName()); + successData.setJobId(uuid); + successData.setJobIdSource(uuidSource.getText()); successData.setDescription(getRole()); successData.setHostname(NetUtils.getLocalHostname()); Date now = new Date(); successData.setTimestamp(now.getTime()); successData.setDate(now.toString()); successData.setFilenames(filenames); + successData.getIOStatistics().aggregate(ioStatistics); commitOperations.createSuccessMarker(getOutputPath(), successData, true); } } @@ -410,26 +495,60 @@ protected void maybeCreateSuccessMarker(JobContext context, * be deleted; creating it now ensures there is something at the end * while the job is in progress -and if nothing is created, that * it is still there. + *

    + * The option {@link InternalCommitterConstants#FS_S3A_COMMITTER_UUID} + * is set to the job UUID; if generated locally + * {@link InternalCommitterConstants#SPARK_WRITE_UUID} is also patched. + * The field {@link #jobSetup} is set to true to note that + * this specific committer instance was used to set up a job. + *

    * @param context context * @throws IOException IO failure */ @Override public void setupJob(JobContext context) throws IOException { - try (DurationInfo d = new DurationInfo(LOG, "preparing destination")) { + try (DurationInfo d = new DurationInfo(LOG, + "Job %s setting up", getUUID())) { + // record that the job has been set up + jobSetup = true; + // patch job conf with the job UUID. + Configuration c = context.getConfiguration(); + c.set(FS_S3A_COMMITTER_UUID, getUUID()); + c.set(FS_S3A_COMMITTER_UUID_SOURCE, getUUIDSource().getText()); + Path dest = getOutputPath(); if (createJobMarker){ - commitOperations.deleteSuccessMarker(getOutputPath()); + commitOperations.deleteSuccessMarker(dest); } - getDestFS().mkdirs(getOutputPath()); + getDestFS().mkdirs(dest); + // do a scan for surplus markers + warnOnActiveUploads(dest); } } + /** + * Task setup. Fails if the the UUID was generated locally, and + * the same committer wasn't used for job setup. + * {@inheritDoc} + * @throws PathCommitException if the task UUID options are unsatisfied. + */ @Override public void setupTask(TaskAttemptContext context) throws IOException { + TaskAttemptID attemptID = context.getTaskAttemptID(); try (DurationInfo d = new DurationInfo(LOG, "Setup Task %s", - context.getTaskAttemptID())) { + attemptID)) { + // reject attempts to set up the task where the output won't be + // picked up + if (!jobSetup + && getUUIDSource() == JobUUIDSource.GeneratedLocally) { + // on anything other than a test run, the context must not have been + // generated locally. + throw new PathCommitException(getOutputPath().toString(), + "Task attempt " + attemptID + + " " + E_SELF_GENERATED_JOB_UUID); + } Path taskAttemptPath = getTaskAttemptPath(context); - FileSystem fs = getTaskAttemptFilesystem(context); + FileSystem fs = taskAttemptPath.getFileSystem(getConf()); fs.mkdirs(taskAttemptPath); } } @@ -472,13 +591,13 @@ protected void commitPendingUploads( Tasks.foreach(pending.getSourceFiles()) .stopOnFailure() .suppressExceptions(false) - .executeWith(buildThreadPool(context)) - .abortWith(path -> - loadAndAbort(commitContext, pending, path, true, false)) - .revertWith(path -> - loadAndRevert(commitContext, pending, path)) - .run(path -> - loadAndCommit(commitContext, pending, path)); + .executeWith(buildSubmitter(context)) + .abortWith(status -> + loadAndAbort(commitContext, pending, status, true, false)) + .revertWith(status -> + loadAndRevert(commitContext, pending, status)) + .run(status -> + loadAndCommit(commitContext, pending, status)); } } @@ -502,8 +621,8 @@ protected void precommitCheckPendingFiles( Tasks.foreach(pending.getSourceFiles()) .stopOnFailure() .suppressExceptions(false) - .executeWith(buildThreadPool(context)) - .run(path -> PendingSet.load(sourceFS, path)); + .executeWith(buildSubmitter(context)) + .run(status -> PendingSet.load(sourceFS, status)); } } @@ -511,21 +630,30 @@ protected void precommitCheckPendingFiles( * Load a pendingset file and commit all of its contents. * @param commitContext context to commit through * @param activeCommit commit state - * @param path path to load + * @param status file to load * @throws IOException failure */ private void loadAndCommit( final CommitOperations.CommitContext commitContext, final ActiveCommit activeCommit, - final Path path) throws IOException { + final FileStatus status) throws IOException { + final Path path = status.getPath(); try (DurationInfo ignored = - new DurationInfo(LOG, false, "Committing %s", path)) { - PendingSet pendingSet = PendingSet.load(activeCommit.getSourceFS(), path); + new DurationInfo(LOG, + "Loading and committing files in pendingset %s", path)) { + PendingSet pendingSet = PendingSet.load(activeCommit.getSourceFS(), + status); + String jobId = pendingSet.getJobId(); + if (!StringUtils.isEmpty(jobId) && !getUUID().equals(jobId)) { + throw new PathCommitException(path, + String.format("Mismatch in Job ID (%s) and commit job ID (%s)", + getUUID(), jobId)); + } Tasks.foreach(pendingSet.getCommits()) .stopOnFailure() .suppressExceptions(false) - .executeWith(singleCommitThreadPool()) + .executeWith(singleThreadSubmitter()) .onFailure((commit, exception) -> commitContext.abortSingleCommit(commit)) .abortWith(commitContext::abortSingleCommit) @@ -535,6 +663,7 @@ private void loadAndCommit( activeCommit.uploadCommitted( commit.getDestinationKey(), commit.getLength()); }); + activeCommit.pendingsetCommitted(pendingSet.getIOStatistics()); } } @@ -542,17 +671,19 @@ private void loadAndCommit( * Load a pendingset file and revert all of its contents. * @param commitContext context to commit through * @param activeCommit commit state - * @param path path to load + * @param status status of file to load * @throws IOException failure */ private void loadAndRevert( final CommitOperations.CommitContext commitContext, final ActiveCommit activeCommit, - final Path path) throws IOException { + final FileStatus status) throws IOException { + final Path path = status.getPath(); try (DurationInfo ignored = new DurationInfo(LOG, false, "Committing %s", path)) { - PendingSet pendingSet = PendingSet.load(activeCommit.getSourceFS(), path); + PendingSet pendingSet = PendingSet.load(activeCommit.getSourceFS(), + status); Tasks.foreach(pendingSet.getCommits()) .suppressExceptions(true) .run(commitContext::revertCommit); @@ -563,24 +694,25 @@ private void loadAndRevert( * Load a pendingset file and abort all of its contents. * @param commitContext context to commit through * @param activeCommit commit state - * @param path path to load + * @param status status of file to load * @param deleteRemoteFiles should remote files be deleted? * @throws IOException failure */ private void loadAndAbort( final CommitOperations.CommitContext commitContext, final ActiveCommit activeCommit, - final Path path, + final FileStatus status, final boolean suppressExceptions, final boolean deleteRemoteFiles) throws IOException { + final Path path = status.getPath(); try (DurationInfo ignored = new DurationInfo(LOG, false, "Aborting %s", path)) { PendingSet pendingSet = PendingSet.load(activeCommit.getSourceFS(), - path); + status); FileSystem fs = getDestFS(); Tasks.foreach(pendingSet.getCommits()) - .executeWith(singleCommitThreadPool()) + .executeWith(singleThreadSubmitter()) .suppressExceptions(suppressExceptions) .run(commit -> { try { @@ -616,8 +748,9 @@ protected CommitOperations.CommitContext initiateCommitOperation() protected void commitJobInternal(JobContext context, ActiveCommit pending) throws IOException { - - commitPendingUploads(context, pending); + trackDurationOfInvocation(committerStatistics, + COMMITTER_COMMIT_JOB.getSymbol(), + () -> commitPendingUploads(context, pending)); } @Override @@ -658,6 +791,13 @@ protected void abortJobInternal(JobContext context, */ protected void abortPendingUploadsInCleanup( boolean suppressExceptions) throws IOException { + // return early if aborting is disabled. + if (!shouldAbortUploadsInCleanup()) { + LOG.debug("Not cleanup up pending uploads to {} as {} is false ", + getOutputPath(), + FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS); + return; + } Path dest = getOutputPath(); try (DurationInfo ignored = new DurationInfo(LOG, "Aborting all pending commits under %s", @@ -673,14 +813,27 @@ protected void abortPendingUploadsInCleanup( maybeIgnore(suppressExceptions, "aborting pending uploads", e); return; } - Tasks.foreach(pending) - .executeWith(buildThreadPool(getJobContext())) - .suppressExceptions(suppressExceptions) - .run(u -> commitContext.abortMultipartCommit( - u.getKey(), u.getUploadId())); + if (!pending.isEmpty()) { + LOG.warn("{} pending uploads were found -aborting", pending.size()); + LOG.warn("If other tasks/jobs are writing to {}," + + "this action may cause them to fail", dest); + Tasks.foreach(pending) + .executeWith(buildSubmitter(getJobContext())) + .suppressExceptions(suppressExceptions) + .run(u -> commitContext.abortMultipartCommit( + u.getKey(), u.getUploadId())); + } else { + LOG.info("No pending uploads were found"); + } } } + private boolean shouldAbortUploadsInCleanup() { + return getConf() + .getBoolean(FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS, + DEFAULT_FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS); + } + /** * Subclass-specific pre-Job-commit actions. * The staging committers all load the pending files to verify that @@ -838,44 +991,116 @@ protected String getRole() { } /** - * Returns an {@link ExecutorService} for parallel tasks. The number of + * Returns an {@link Tasks.Submitter} for parallel tasks. The number of * threads in the thread-pool is set by fs.s3a.committer.threads. * If num-threads is 0, this will return null; + * this is used in Tasks as a cue + * to switch to single-threaded execution. * * @param context the JobContext for this commit - * @return an {@link ExecutorService} or null for the number of threads + * @return a submitter or null */ - protected final synchronized ExecutorService buildThreadPool( + protected Tasks.Submitter buildSubmitter( JobContext context) { + if (getThreadCount(context) > 0) { + return new PoolSubmitter(context); + } else { + return null; + } + } + /** + * Returns an {@link ExecutorService} for parallel tasks. The number of + * threads in the thread-pool is set by fs.s3a.committer.threads. + * If num-threads is 0, this will raise an exception. + * + * @param context the JobContext for this commit + * @param numThreads threads + * @return an {@link ExecutorService} for the number of threads + */ + private synchronized ExecutorService buildThreadPool( + JobContext context, int numThreads) { + Preconditions.checkArgument(numThreads > 0, + "Cannot create a thread pool with no threads"); if (threadPool == null) { - int numThreads = context.getConfiguration().getInt( - FS_S3A_COMMITTER_THREADS, - DEFAULT_COMMITTER_THREADS); LOG.debug("{}: creating thread pool of size {}", getRole(), numThreads); - if (numThreads > 0) { - threadPool = HadoopExecutors.newFixedThreadPool(numThreads, - new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat(THREAD_PREFIX + context.getJobID() + "-%d") - .build()); - } else { - return null; - } + threadPool = HadoopExecutors.newFixedThreadPool(numThreads, + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat(THREAD_PREFIX + context.getJobID() + "-%d") + .build()); } return threadPool; } + /** + * Get the thread count for this job's commit operations. + * @param context the JobContext for this commit + * @return a possibly zero thread count. + */ + private int getThreadCount(final JobContext context) { + return context.getConfiguration().getInt( + FS_S3A_COMMITTER_THREADS, + DEFAULT_COMMITTER_THREADS); + } + + /** + * Submit a runnable. + * This will demand-create the thread pool if needed. + *

    + * This is synchronized to ensure the thread pool is always valid when + * work is synchronized. See HADOOP-16798. + * @param context the JobContext for this commit + * @param task task to execute + * @return the future of the submitted task. + */ + private synchronized Future submitRunnable( + final JobContext context, + final Runnable task) { + return buildThreadPool(context, getThreadCount(context)).submit(task); + } + + /** + * The real task submitter, which hands off the work to + * the current thread pool. + */ + private final class PoolSubmitter implements Tasks.Submitter { + + private final JobContext context; + + private final int numThreads; + + private PoolSubmitter(final JobContext context) { + this.numThreads = getThreadCount(context); + Preconditions.checkArgument(numThreads > 0, + "Cannot create a thread pool with no threads"); + this.context = context; + } + + @Override + public Future submit(final Runnable task) { + return submitRunnable(context, task); + } + + } + /** * Destroy any thread pools; wait for that to finish, * but don't overreact if it doesn't finish in time. */ - protected synchronized void destroyThreadPool() { - if (threadPool != null) { + protected void destroyThreadPool() { + ExecutorService pool; + // reset the thread pool in a sync block, then shut it down + // afterwards. This allows for other threads to create a + // new thread pool on demand. + synchronized(this) { + pool = this.threadPool; + threadPool = null; + } + if (pool != null) { LOG.debug("Destroying thread pool"); - HadoopExecutors.shutdown(threadPool, LOG, + HadoopExecutors.shutdown(pool, LOG, THREAD_POOL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS); - threadPool = null; } } @@ -884,11 +1109,9 @@ protected synchronized void destroyThreadPool() { * within the commit of all uploads of a single task. * This is currently null; it is here to allow the Tasks class to * provide the logic for execute/revert. - * Why not use the existing thread pool? Too much fear of deadlocking, - * and tasks are being committed in parallel anyway. * @return null. always. */ - protected final synchronized ExecutorService singleCommitThreadPool() { + protected final synchronized Tasks.Submitter singleThreadSubmitter() { return null; } @@ -932,7 +1155,7 @@ protected void abortPendingUploads(JobContext context, CommitOperations.CommitContext commitContext = initiateCommitOperation()) { Tasks.foreach(pending) - .executeWith(buildThreadPool(context)) + .executeWith(buildSubmitter(context)) .suppressExceptions(suppressExceptions) .run(commitContext::abortSingleCommit); } @@ -961,7 +1184,7 @@ protected void abortPendingUploads( CommitOperations.CommitContext commitContext = initiateCommitOperation()) { Tasks.foreach(pending.getSourceFiles()) - .executeWith(buildThreadPool(context)) + .executeWith(buildSubmitter(context)) .suppressExceptions(suppressExceptions) .run(path -> loadAndAbort(commitContext, @@ -973,6 +1196,171 @@ protected void abortPendingUploads( } } + @Override + public IOStatistics getIOStatistics() { + return committerStatistics.getIOStatistics(); + } + + /** + * Scan for active uploads and list them along with a warning message. + * Errors are ignored. + * @param path output path of job. + */ + protected void warnOnActiveUploads(final Path path) { + List pending; + try { + pending = getCommitOperations() + .listPendingUploadsUnderPath(path); + } catch (IOException e) { + LOG.debug("Failed to list uploads under {}", + path, e); + return; + } + if (!pending.isEmpty()) { + // log a warning + LOG.warn("{} active upload(s) in progress under {}", + pending.size(), + path); + LOG.warn("Either jobs are running concurrently" + + " or failed jobs are not being cleaned up"); + // and the paths + timestamps + DateFormat df = DateFormat.getDateTimeInstance(); + pending.forEach(u -> + LOG.info("[{}] {}", + df.format(u.getInitiated()), + u.getKey())); + if (shouldAbortUploadsInCleanup()) { + LOG.warn("This committer will abort these uploads in job cleanup"); + } + } + } + + /** + * Build the job UUID. + * + *

    + * In MapReduce jobs, the application ID is issued by YARN, and + * unique across all jobs. + *

    + *

    + * Spark will use a fake app ID based on the current time. + * This can lead to collisions on busy clusters unless + * the specific spark release has SPARK-33402 applied. + * This appends a random long value to the timestamp, so + * is unique enough that the risk of collision is almost + * nonexistent. + *

    + *

    + * The order of selection of a uuid is + *

    + *
      + *
    1. Value of + * {@link InternalCommitterConstants#FS_S3A_COMMITTER_UUID}.
    2. + *
    3. Value of + * {@link InternalCommitterConstants#SPARK_WRITE_UUID}.
    4. + *
    5. If enabled through + * {@link CommitConstants#FS_S3A_COMMITTER_GENERATE_UUID}: + * Self-generated uuid.
    6. + *
    7. If {@link CommitConstants#FS_S3A_COMMITTER_REQUIRE_UUID} + * is not set: Application ID
    8. + *
    + * The UUID bonding takes place during construction; + * the staging committers use it to set up their wrapped + * committer to a path in the cluster FS which is unique to the + * job. + *

    + * In MapReduce jobs, the application ID is issued by YARN, and + * unique across all jobs. + *

    + * In {@link #setupJob(JobContext)} the job context's configuration + * will be patched + * be valid in all sequences where the job has been set up for the + * configuration passed in. + *

    + * If the option {@link CommitConstants#FS_S3A_COMMITTER_REQUIRE_UUID} + * is set, then an external UUID MUST be passed in. + * This can be used to verify that the spark engine is reliably setting + * unique IDs for staging. + *

    + * @param conf job/task configuration + * @param jobId job ID from YARN or spark. + * @return Job UUID and source of it. + * @throws PathCommitException no UUID was found and it was required + */ + public static Pair + buildJobUUID(Configuration conf, JobID jobId) + throws PathCommitException { + + String jobUUID = conf.getTrimmed(FS_S3A_COMMITTER_UUID, ""); + + if (!jobUUID.isEmpty()) { + return Pair.of(jobUUID, JobUUIDSource.CommitterUUIDProperty); + } + // there is no job UUID. + // look for one from spark + jobUUID = conf.getTrimmed(SPARK_WRITE_UUID, ""); + if (!jobUUID.isEmpty()) { + return Pair.of(jobUUID, JobUUIDSource.SparkWriteUUID); + } + + // there is no UUID configuration in the job/task config + + // Check the job hasn't declared a requirement for the UUID. + // This allows or fail-fast validation of Spark behavior. + if (conf.getBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, + DEFAULT_S3A_COMMITTER_REQUIRE_UUID)) { + throw new PathCommitException("", E_NO_SPARK_UUID); + } + + // see if the job can generate a random UUI` + if (conf.getBoolean(FS_S3A_COMMITTER_GENERATE_UUID, + DEFAULT_S3A_COMMITTER_GENERATE_UUID)) { + // generate a random UUID. This is OK for a job, for a task + // it means that the data may not get picked up. + String newId = UUID.randomUUID().toString(); + LOG.warn("No job ID in configuration; generating a random ID: {}", + newId); + return Pair.of(newId, JobUUIDSource.GeneratedLocally); + } + // if no other option was supplied, return the job ID. + // This is exactly what MR jobs expect, but is not what + // Spark jobs can do as there is a risk of jobID collision. + return Pair.of(jobId.toString(), JobUUIDSource.JobID); + } + + /** + * Enumeration of Job UUID source. + */ + public enum JobUUIDSource { + SparkWriteUUID(SPARK_WRITE_UUID), + CommitterUUIDProperty(FS_S3A_COMMITTER_UUID), + JobID("JobID"), + GeneratedLocally("Generated Locally"); + + private final String text; + + JobUUIDSource(final String text) { + this.text = text; + } + + /** + * Source for messages. + * @return text + */ + public String getText() { + return text; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "JobUUIDSource{"); + sb.append("text='").append(text).append('\''); + sb.append('}'); + return sb.toString(); + } + } + /** * State of the active commit operation. * @@ -1000,7 +1388,7 @@ public static class ActiveCommit { = new ActiveCommit(null, new ArrayList<>()); /** All pendingset files to iterate through. */ - private final List sourceFiles; + private final List sourceFiles; /** * Filesystem for the source files. @@ -1023,6 +1411,13 @@ public static class ActiveCommit { */ private long committedBytes; + /** + * Aggregate statistics of all supplied by + * committed uploads. + */ + private final IOStatisticsSnapshot ioStatistics = + new IOStatisticsSnapshot(); + /** * Construct from a source FS and list of files. * @param sourceFS filesystem containing the list of pending files @@ -1030,8 +1425,8 @@ public static class ActiveCommit { */ public ActiveCommit( final FileSystem sourceFS, - final List sourceFiles) { - this.sourceFiles = sourceFiles; + final List sourceFiles) { + this.sourceFiles = (List) sourceFiles; this.sourceFS = sourceFS; } @@ -1044,10 +1439,7 @@ public ActiveCommit( public static ActiveCommit fromStatusList( final FileSystem pendingFS, final List statuses) { - return new ActiveCommit(pendingFS, - statuses.stream() - .map(FileStatus::getPath) - .collect(Collectors.toList())); + return new ActiveCommit(pendingFS, statuses); } /** @@ -1058,7 +1450,7 @@ public static ActiveCommit empty() { return EMPTY; } - public List getSourceFiles() { + public List getSourceFiles() { return sourceFiles; } @@ -1074,7 +1466,8 @@ public FileSystem getSourceFS() { * @param key key of the committed object. * @param size size in bytes. */ - public synchronized void uploadCommitted(String key, long size) { + public synchronized void uploadCommitted(String key, + long size) { if (committedObjects.size() < SUCCESS_MARKER_FILE_LIMIT) { committedObjects.add( key.startsWith("/") ? key : ("/" + key)); @@ -1083,6 +1476,19 @@ public synchronized void uploadCommitted(String key, long size) { committedBytes += size; } + /** + * Callback when a pendingset has been committed, + * including any source statistics. + * @param sourceStatistics any source statistics + */ + public void pendingsetCommitted(final IOStatistics sourceStatistics) { + ioStatistics.aggregate(sourceStatistics); + } + + public IOStatisticsSnapshot getIOStatistics() { + return ioStatistics; + } + public synchronized List getCommittedObjects() { return committedObjects; } @@ -1103,8 +1509,8 @@ public boolean isEmpty() { return sourceFiles.isEmpty(); } - public void add(Path path) { - sourceFiles.add(path); + public void add(FileStatus status) { + sourceFiles.add(status); } } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java index 3e28a5d2cf96f..bbc59f168f60d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import static org.apache.hadoop.fs.s3a.Constants.XA_HEADER_PREFIX; import static org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory.COMMITTER_FACTORY_SCHEME_PATTERN; /** @@ -107,9 +108,9 @@ private CommitConstants() { = "s3a:magic.committer"; /** - * Is the committer enabled by default? No. + * Is the committer enabled by default: {@value}. */ - public static final boolean DEFAULT_MAGIC_COMMITTER_ENABLED = false; + public static final boolean DEFAULT_MAGIC_COMMITTER_ENABLED = true; /** * This is the "Pending" directory of the {@code FileOutputCommitter}; @@ -240,20 +241,39 @@ private CommitConstants() { /** - * Should the staging committers abort all pending uploads to the destination - * directory? Default: true. - * - * Changing this is if more than one partitioned committer is + * Should committers abort all pending uploads to the destination + * directory? + *

    + * Deprecated: switch to {@link #FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS}. + */ + @Deprecated + public static final String FS_S3A_COMMITTER_STAGING_ABORT_PENDING_UPLOADS = + "fs.s3a.committer.staging.abort.pending.uploads"; + + /** + * Should committers abort all pending uploads to the destination + * directory? + *

    + * Value: {@value}. + *

    + * Change this is if more than one committer is * writing to the same destination tree simultaneously; otherwise * the first job to complete will cancel all outstanding uploads from the - * others. However, it may lead to leaked outstanding uploads from failed - * tasks. If disabled, configure the bucket lifecycle to remove uploads + * others. If disabled, configure the bucket lifecycle to remove uploads * after a time period, and/or set up a workflow to explicitly delete * entries. Otherwise there is a risk that uncommitted uploads may run up * bills. */ - public static final String FS_S3A_COMMITTER_STAGING_ABORT_PENDING_UPLOADS = - "fs.s3a.committer.staging.abort.pending.uploads"; + public static final String FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS = + "fs.s3a.committer.abort.pending.uploads"; + + /** + * Default configuration value for + * {@link #FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS}. + * Value: {@value}. + */ + public static final boolean DEFAULT_FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS = + true; /** * The limit to the number of committed objects tracked during @@ -261,4 +281,53 @@ private CommitConstants() { */ public static final int SUCCESS_MARKER_FILE_LIMIT = 100; + /** Extra Data key for task attempt in pendingset files. */ + public static final String TASK_ATTEMPT_ID = "task.attempt.id"; + + /** + * Require the spark UUID to be passed down: {@value}. + * This is to verify that SPARK-33230 has been applied to spark, and that + * {@link InternalCommitterConstants#SPARK_WRITE_UUID} is set. + *

    + * MUST ONLY BE SET WITH SPARK JOBS. + *

    + */ + public static final String FS_S3A_COMMITTER_REQUIRE_UUID = + "fs.s3a.committer.require.uuid"; + + /** + * Default value for {@link #FS_S3A_COMMITTER_REQUIRE_UUID}: {@value}. + */ + public static final boolean DEFAULT_S3A_COMMITTER_REQUIRE_UUID = + false; + + /** + * Generate a UUID in job setup rather than fall back to + * YARN Application attempt ID. + *

    + * MUST ONLY BE SET WITH SPARK JOBS. + *

    + */ + public static final String FS_S3A_COMMITTER_GENERATE_UUID = + "fs.s3a.committer.generate.uuid"; + + /** + * Default value for {@link #FS_S3A_COMMITTER_GENERATE_UUID}: {@value}. + */ + public static final boolean DEFAULT_S3A_COMMITTER_GENERATE_UUID = + false; + + /** + * Magic Marker header to declare final file length on magic uploads + * marker objects: {@value}. + */ + public static final String X_HEADER_MAGIC_MARKER = + "x-hadoop-s3a-magic-data-length"; + + /** + * XAttr name of magic marker, with "header." prefix: {@value}. + */ + public static final String XA_MAGIC_MARKER = XA_HEADER_PREFIX + + X_HEADER_MAGIC_MARKER; + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java index 8592ad490184e..c09278fbdcf32 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -33,31 +34,42 @@ import com.amazonaws.services.s3.model.PartETag; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.s3a.S3AFileSystem; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; import org.apache.hadoop.fs.s3a.S3AUtils; import org.apache.hadoop.fs.s3a.WriteOperationHelper; import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; import org.apache.hadoop.fs.s3a.commit.files.SuccessData; +import org.apache.hadoop.fs.s3a.impl.HeaderProcessing; +import org.apache.hadoop.fs.s3a.impl.InternalConstants; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.Progressable; +import static java.util.Objects.requireNonNull; import static org.apache.hadoop.fs.s3a.S3AUtils.*; +import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_MATERIALIZE_FILE; +import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_STAGE_FILE_UPLOAD; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; import static org.apache.hadoop.fs.s3a.Constants.*; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDuration; /** * The implementation of the various actions a committer needs. @@ -69,7 +81,7 @@ * duplicate that work. * */ -public class CommitOperations { +public class CommitOperations implements IOStatisticsSource { private static final Logger LOG = LoggerFactory.getLogger( CommitOperations.class); @@ -79,7 +91,7 @@ public class CommitOperations { private final S3AFileSystem fs; /** Statistics. */ - private final S3AInstrumentation.CommitterStatistics statistics; + private final CommitterStatistics statistics; /** * Write operations for the destination fs. @@ -103,9 +115,18 @@ public class CommitOperations { * @param fs FS to bind to */ public CommitOperations(S3AFileSystem fs) { - Preconditions.checkArgument(fs != null, "null fs"); - this.fs = fs; - statistics = fs.newCommitterStatistics(); + this(requireNonNull(fs), fs.newCommitterStatistics()); + } + + /** + * Instantiate. + * @param fs FS to bind to + * @param committerStatistics committer statistics + */ + public CommitOperations(S3AFileSystem fs, + CommitterStatistics committerStatistics) { + this.fs = requireNonNull(fs); + statistics = requireNonNull(committerStatistics); writeOperations = fs.getWriteOperationHelper(); } @@ -126,10 +147,15 @@ public String toString() { } /** @return statistics. */ - protected S3AInstrumentation.CommitterStatistics getStatistics() { + protected CommitterStatistics getStatistics() { return statistics; } + @Override + public IOStatistics getIOStatistics() { + return statistics.getIOStatistics(); + } + /** * Commit the operation, throwing an exception on any failure. * @param commit commit to execute @@ -157,10 +183,15 @@ private MaybeIOE commit( LOG.debug("Committing single commit {}", commit); MaybeIOE outcome; String destKey = "unknown destination"; - try { + try (DurationInfo d = new DurationInfo(LOG, + "Committing file %s size %s", + commit.getDestinationKey(), + commit.getLength())) { + commit.validate(); destKey = commit.getDestinationKey(); - long l = innerCommit(commit, operationState); + long l = trackDuration(statistics, COMMITTER_MATERIALIZE_FILE.getSymbol(), + () -> innerCommit(commit, operationState)); LOG.debug("Successful commit of file length {}", l); outcome = MaybeIOE.NONE; statistics.commitCompleted(commit.getLength()); @@ -271,7 +302,7 @@ private void abortSingleCommit(SinglePendingCommit commit) ? (" defined in " + commit.getFilename()) : ""; String uploadId = commit.getUploadId(); - LOG.info("Aborting commit to object {}{}", destKey, origin); + LOG.info("Aborting commit ID {} to object {}{}", uploadId, destKey, origin); abortMultipartCommit(destKey, uploadId); } @@ -285,7 +316,8 @@ private void abortSingleCommit(SinglePendingCommit commit) */ private void abortMultipartCommit(String destKey, String uploadId) throws IOException { - try { + try (DurationInfo d = new DurationInfo(LOG, + "Aborting commit ID %s to path %s", uploadId, destKey)) { writeOperations.abortMultipartCommit(destKey, uploadId); } finally { statistics.commitAborted(); @@ -402,8 +434,6 @@ public void createSuccessMarker(Path outputPath, conf.getTrimmed(METADATASTORE_AUTHORITATIVE, "false")); successData.addDiagnostic(AUTHORITATIVE_PATH, conf.getTrimmed(AUTHORITATIVE_PATH, "")); - successData.addDiagnostic(MAGIC_COMMITTER_ENABLED, - conf.getTrimmed(MAGIC_COMMITTER_ENABLED, "false")); // now write Path markerPath = new Path(outputPath, _SUCCESS); @@ -442,7 +472,7 @@ public void revertCommit(SinglePendingCommit commit, * @return a pending upload entry * @throws IOException failure */ - public SinglePendingCommit uploadFileToPendingCommit(File localFile, + public SinglePendingCommit uploadFileToPendingCommit(File localFile, Path destPath, String partition, long uploadPartSize, @@ -459,8 +489,16 @@ public SinglePendingCommit uploadFileToPendingCommit(File localFile, String destKey = fs.pathToKey(destPath); String uploadId = null; + // flag to indicate to the finally clause that the operation + // failed. it is cleared as the last action in the try block. boolean threw = true; - try { + final DurationTracker tracker = statistics.trackDuration( + COMMITTER_STAGE_FILE_UPLOAD.getSymbol()); + try (DurationInfo d = new DurationInfo(LOG, + "Upload staged file from %s to %s", + localFile.getAbsolutePath(), + destPath)) { + statistics.commitCreated(); uploadId = writeOperations.initiateMultiPartUpload(destKey); long length = localFile.length(); @@ -481,12 +519,22 @@ public SinglePendingCommit uploadFileToPendingCommit(File localFile, if (numParts == 0) { numParts = 1; } + if (numParts > InternalConstants.DEFAULT_UPLOAD_PART_COUNT_LIMIT) { + // fail if the file is too big. + // it would be possible to be clever here and recalculate the part size, + // but this is not currently done. + throw new PathIOException(destPath.toString(), + String.format("File to upload (size %d)" + + " is too big to be uploaded in parts of size %d", + numParts, length)); + } List parts = new ArrayList<>((int) numParts); LOG.debug("File size is {}, number of parts to upload = {}", length, numParts); for (int partNumber = 1; partNumber <= numParts; partNumber += 1) { + progress.progress(); long size = Math.min(length - offset, uploadPartSize); UploadPartRequest part; part = writeOperations.newUploadPartRequest( @@ -505,18 +553,22 @@ public SinglePendingCommit uploadFileToPendingCommit(File localFile, commitData.bindCommitData(parts); statistics.commitUploaded(length); - progress.progress(); + // clear the threw flag. threw = false; return commitData; } finally { if (threw && uploadId != null) { - statistics.commitAborted(); try { abortMultipartCommit(destKey, uploadId); } catch (IOException e) { LOG.error("Failed to abort upload {} to {}", uploadId, destKey, e); } } + if (threw) { + tracker.failed(); + } + // close tracker and so report statistics of success/failure + tracker.close(); } } @@ -555,6 +607,29 @@ public CommitContext initiateCommitOperation(Path path) throws IOException { return new CommitContext(writeOperations.initiateCommitOperation(path)); } + /** + * Get the magic file length of a file. + * If the FS doesn't support the API, the attribute is missing or + * the parse to long fails, then Optional.empty() is returned. + * Static for some easier testability. + * @param fs filesystem + * @param path path + * @return either a length or None. + * @throws IOException on error + * */ + public static Optional extractMagicFileLength(FileSystem fs, Path path) + throws IOException { + byte[] bytes; + try { + bytes = fs.getXAttr(path, XA_MAGIC_MARKER); + } catch (UnsupportedOperationException e) { + // FS doesn't support xattr. + LOG.debug("Filesystem {} doesn't support XAttr API", fs); + return Optional.empty(); + } + return HeaderProcessing.extractXAttrLongValue(bytes); + } + /** * Commit context. * diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitUtilsWithMR.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitUtilsWithMR.java index c6c0da8309cc0..9e5ee860e85ff 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitUtilsWithMR.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitUtilsWithMR.java @@ -58,7 +58,7 @@ public static Path getMagicJobAttemptsPath(Path out) { /** * Get the Application Attempt ID for this job. * @param context the context to look in - * @return the Application Attempt ID for a given job. + * @return the Application Attempt ID for a given job, or 0 */ public static int getAppAttemptId(JobContext context) { return context.getConfiguration().getInt( @@ -67,33 +67,32 @@ public static int getAppAttemptId(JobContext context) { /** * Compute the "magic" path for a job attempt. - * @param appAttemptId the ID of the application attempt for this job. + * @param jobUUID unique Job ID. * @param dest the final output directory * @return the path to store job attempt data. */ - public static Path getMagicJobAttemptPath(int appAttemptId, Path dest) { + public static Path getMagicJobAttemptPath(String jobUUID, Path dest) { return new Path(getMagicJobAttemptsPath(dest), - formatAppAttemptDir(appAttemptId)); + formatAppAttemptDir(jobUUID)); } /** * Format the application attempt directory. - * @param attemptId attempt ID + * @param jobUUID unique Job ID. * @return the directory name for the application attempt */ - public static String formatAppAttemptDir(int attemptId) { - return String.format("app-attempt-%04d", attemptId); + public static String formatAppAttemptDir(String jobUUID) { + return String.format("job-%s", jobUUID); } /** * Compute the path where the output of magic task attempts are stored. - * @param context the context of the job with magic tasks. + * @param jobUUID unique Job ID. * @param dest destination of work * @return the path where the output of magic task attempts are stored. */ - public static Path getMagicTaskAttemptsPath(JobContext context, Path dest) { - return new Path(getMagicJobAttemptPath( - getAppAttemptId(context), dest), "tasks"); + public static Path getMagicTaskAttemptsPath(String jobUUID, Path dest) { + return new Path(getMagicJobAttemptPath(jobUUID, dest), "tasks"); } /** @@ -102,48 +101,56 @@ public static Path getMagicTaskAttemptsPath(JobContext context, Path dest) { * This path is marked as a base path for relocations, so subdirectory * information is preserved. * @param context the context of the task attempt. + * @param jobUUID unique Job ID. * @param dest The output path to commit work into * @return the path where a task attempt should be stored. */ public static Path getMagicTaskAttemptPath(TaskAttemptContext context, + String jobUUID, Path dest) { - return new Path(getBaseMagicTaskAttemptPath(context, dest), BASE); + return new Path(getBaseMagicTaskAttemptPath(context, jobUUID, dest), + BASE); } /** * Get the base Magic attempt path, without any annotations to mark relative * references. * @param context task context. + * @param jobUUID unique Job ID. * @param dest The output path to commit work into * @return the path under which all attempts go */ public static Path getBaseMagicTaskAttemptPath(TaskAttemptContext context, + String jobUUID, Path dest) { - return new Path(getMagicTaskAttemptsPath(context, dest), + return new Path(getMagicTaskAttemptsPath(jobUUID, dest), String.valueOf(context.getTaskAttemptID())); } /** * Compute a path for temporary data associated with a job. * This data is not magic - * @param appAttemptId the ID of the application attempt for this job. + * @param jobUUID unique Job ID. * @param out output directory of job * @return the path to store temporary job attempt data. */ - public static Path getTempJobAttemptPath(int appAttemptId, Path out) { + public static Path getTempJobAttemptPath(String jobUUID, + Path out) { return new Path(new Path(out, TEMP_DATA), - formatAppAttemptDir(appAttemptId)); + formatAppAttemptDir(jobUUID)); } /** - * Compute the path where the output of a given job attempt will be placed. + * Compute the path where the output of a given task attempt will be placed. * @param context task context + * @param jobUUID unique Job ID. * @param out output directory of job * @return the path to store temporary job attempt data. */ public static Path getTempTaskAttemptPath(TaskAttemptContext context, - Path out) { - return new Path(getTempJobAttemptPath(getAppAttemptId(context), out), + final String jobUUID, Path out) { + return new Path( + getTempJobAttemptPath(jobUUID, out), String.valueOf(context.getTaskAttemptID())); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/InternalCommitterConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/InternalCommitterConstants.java index 2821fcea4908b..461c9a5e646c3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/InternalCommitterConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/InternalCommitterConstants.java @@ -46,8 +46,14 @@ private InternalCommitterConstants() { /** * A unique identifier to use for this work: {@value}. */ - public static final String FS_S3A_COMMITTER_STAGING_UUID = - "fs.s3a.committer.staging.uuid"; + public static final String FS_S3A_COMMITTER_UUID = + "fs.s3a.committer.uuid"; + + /** + * Where did the UUID come from? {@value}. + */ + public static final String FS_S3A_COMMITTER_UUID_SOURCE = + "fs.s3a.committer.uuid.source"; /** * Directory committer factory: {@value}. @@ -97,4 +103,25 @@ private InternalCommitterConstants() { /** Error message for a path without a magic element in the list: {@value}. */ public static final String E_NO_MAGIC_PATH_ELEMENT = "No " + MAGIC + " element in path"; + + /** + * The UUID for jobs: {@value}. + * This was historically created in Spark 1.x's SQL queries, but "went away". + */ + public static final String SPARK_WRITE_UUID = + "spark.sql.sources.writeJobUUID"; + + /** + * Java temp dir: {@value}. + */ + public static final String JAVA_IO_TMPDIR = "java.io.tmpdir"; + + /** + * Incoming Job/task configuration didn't contain any option + * {@link #SPARK_WRITE_UUID}. + */ + public static final String E_NO_SPARK_UUID = + "Job/task context does not contain a unique ID in " + + SPARK_WRITE_UUID; + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitIntegration.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitIntegration.java index 7f9dadf06f320..41f95c8620692 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitIntegration.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitIntegration.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.Statistic; import org.apache.hadoop.fs.s3a.commit.magic.MagicCommitTracker; +import org.apache.hadoop.fs.s3a.impl.StoreContext; import static org.apache.hadoop.fs.s3a.commit.MagicCommitPaths.*; @@ -50,6 +51,8 @@ public class MagicCommitIntegration { private final S3AFileSystem owner; private final boolean magicCommitEnabled; + private final StoreContext storeContext; + /** * Instantiate. * @param owner owner class @@ -59,6 +62,7 @@ public MagicCommitIntegration(S3AFileSystem owner, boolean magicCommitEnabled) { this.owner = owner; this.magicCommitEnabled = magicCommitEnabled; + this.storeContext = owner.createStoreContext(); } /** @@ -94,10 +98,10 @@ public PutTracker createTracker(Path path, String key) { if (isMagicCommitPath(elements)) { final String destKey = keyOfFinalDestination(elements, key); String pendingsetPath = key + CommitConstants.PENDING_SUFFIX; - owner.getInstrumentation() - .incrementCounter(Statistic.COMMITTER_MAGIC_FILES_CREATED, 1); + storeContext.incrementStatistic( + Statistic.COMMITTER_MAGIC_FILES_CREATED); tracker = new MagicCommitTracker(path, - owner.getBucket(), + storeContext.getBucket(), key, destKey, pendingsetPath, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitPaths.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitPaths.java index 745b5b2289b2b..19713909f50c4 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitPaths.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/MagicCommitPaths.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.StringUtils; -import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.BASE; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.MAGIC; import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.E_NO_MAGIC_PATH_ELEMENT; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/PutTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/PutTracker.java index bbffef3ad221c..10440f77e7277 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/PutTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/PutTracker.java @@ -24,6 +24,7 @@ import com.amazonaws.services.s3.model.PartETag; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.statistics.IOStatistics; /** * Multipart put tracker. @@ -68,6 +69,7 @@ public boolean outputImmediatelyVisible() { * @param uploadId Upload ID * @param parts list of parts * @param bytesWritten bytes written + * @param iostatistics nullable IO statistics * @return true if the commit is to be initiated immediately. * False implies the output stream does not need to worry about * what happens. @@ -75,7 +77,8 @@ public boolean outputImmediatelyVisible() { */ public boolean aboutToComplete(String uploadId, List parts, - long bytesWritten) + long bytesWritten, + final IOStatistics iostatistics) throws IOException { return true; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java index b6b6b9707ebc5..c318e86605e0c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; @@ -76,7 +75,7 @@ public interface FailureTask { */ public static class Builder { private final Iterable items; - private ExecutorService service = null; + private Submitter service = null; private FailureTask onFailure = null; private boolean stopOnFailure = false; private boolean suppressExceptions = false; @@ -96,11 +95,11 @@ public static class Builder { /** * Declare executor service: if null, the tasks are executed in a single * thread. - * @param executorService service to schedule tasks with. + * @param submitter service to schedule tasks with. * @return this builder. */ - public Builder executeWith(ExecutorService executorService) { - this.service = executorService; + public Builder executeWith(Submitter submitter) { + this.service = submitter; return this; } @@ -407,4 +406,18 @@ private static void castAndThrow(Exception e) throws E { } throw (E) e; } + + /** + * Interface to whatever lets us submit tasks. + */ + public interface Submitter { + + /** + * Submit work. + * @param task task to execute + * @return the future of the submitted task. + */ + Future submit(Runnable task); + } + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/PendingSet.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/PendingSet.java index c0d7415fcb9e4..fd7341025667a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/PendingSet.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/PendingSet.java @@ -27,14 +27,18 @@ import java.util.Map; import java.util.Set; +import com.fasterxml.jackson.annotation.JsonProperty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.commit.ValidationFailure; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.util.JsonSerialization; import static org.apache.hadoop.fs.s3a.commit.CommitUtils.validateCollectionClass; @@ -44,11 +48,22 @@ * Persistent format for multiple pending commits. * Contains 0 or more {@link SinglePendingCommit} entries; validation logic * checks those values on load. + *

    + * The statistics published through the {@link IOStatisticsSource} + * interface are the static ones marshalled with the commit data; + * they may be empty. + *

    + *

    + * As single commits are added via {@link #add(SinglePendingCommit)}, + * any statistics from those commits are merged into the aggregate + * statistics, and those of the single commit cleared. + *

    */ @SuppressWarnings("unused") @InterfaceAudience.Private @InterfaceStability.Unstable -public class PendingSet extends PersistentCommitData { +public class PendingSet extends PersistentCommitData + implements IOStatisticsSource { private static final Logger LOG = LoggerFactory.getLogger(PendingSet.class); /** @@ -56,7 +71,7 @@ public class PendingSet extends PersistentCommitData { * If this is changed the value of {@link #serialVersionUID} will change, * to avoid deserialization problems. */ - public static final int VERSION = 1; + public static final int VERSION = 3; /** * Serialization ID: {@value}. @@ -67,6 +82,9 @@ public class PendingSet extends PersistentCommitData { /** Version marker. */ private int version = VERSION; + /** Job ID, if known. */ + private String jobId = ""; + /** * Commit list. */ @@ -77,6 +95,12 @@ public class PendingSet extends PersistentCommitData { */ private final Map extraData = new HashMap<>(0); + /** + * IOStatistics. + */ + @JsonProperty("iostatistics") + private IOStatisticsSnapshot iostats = new IOStatisticsSnapshot(); + public PendingSet() { this(0); } @@ -110,12 +134,31 @@ public static PendingSet load(FileSystem fs, Path path) return instance; } + /** + * Load an instance from a file, then validate it. + * @param fs filesystem + * @param status status of file to load + * @return the loaded instance + * @throws IOException IO failure + * @throws ValidationFailure if the data is invalid + */ + public static PendingSet load(FileSystem fs, FileStatus status) + throws IOException { + return load(fs, status.getPath()); + } + /** * Add a commit. * @param commit the single commit */ public void add(SinglePendingCommit commit) { commits.add(commit); + // add any statistics. + IOStatisticsSnapshot st = commit.getIOStatistics(); + if (st != null) { + iostats.aggregate(st); + st.clear(); + } } /** @@ -189,4 +232,31 @@ public List getCommits() { public void setCommits(List commits) { this.commits = commits; } + + /** + * Set/Update an extra data entry. + * @param key key + * @param value value + */ + public void putExtraData(String key, String value) { + extraData.put(key, value); + } + + /** @return Job ID, if known. */ + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + @Override + public IOStatisticsSnapshot getIOStatistics() { + return iostats; + } + + public void setIOStatistics(final IOStatisticsSnapshot ioStatistics) { + this.iostats = ioStatistics; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/PersistentCommitData.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/PersistentCommitData.java index cc27d079075dc..dba44b9a011d9 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/PersistentCommitData.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/PersistentCommitData.java @@ -40,7 +40,7 @@ public abstract class PersistentCommitData implements Serializable { * If this is changed the value of {@code serialVersionUID} will change, * to avoid deserialization problems. */ - public static final int VERSION = 1; + public static final int VERSION = 2; /** * Validate the data: those fields which must be non empty, must be set. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java index 596dd95685c8b..021cc7b34b1f5 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java @@ -31,7 +31,9 @@ import java.util.Map; import com.amazonaws.services.s3.model.PartETag; -import com.google.common.base.Preconditions; +import com.fasterxml.jackson.annotation.JsonProperty; + +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; @@ -39,6 +41,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.commit.ValidationFailure; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.util.JsonSerialization; import static org.apache.hadoop.fs.s3a.commit.CommitUtils.validateCollectionClass; @@ -46,20 +50,25 @@ import static org.apache.hadoop.util.StringUtils.join; /** - * This is the serialization format for uploads yet to be committerd. - * + * This is the serialization format for uploads yet to be committed. + *

    * It's marked as {@link Serializable} so that it can be passed in RPC * calls; for this to work it relies on the fact that java.io ArrayList * and LinkedList are serializable. If any other list type is used for etags, * it must also be serialized. Jackson expects lists, and it is used * to persist to disk. - * + *

    + *

    + * The statistics published through the {@link IOStatisticsSource} + * interface are the static ones marshalled with the commit data; + * they may be empty. + *

    */ @SuppressWarnings("unused") @InterfaceAudience.Private @InterfaceStability.Unstable public class SinglePendingCommit extends PersistentCommitData - implements Iterable { + implements Iterable, IOStatisticsSource { /** * Serialization ID: {@value}. @@ -113,6 +122,12 @@ public class SinglePendingCommit extends PersistentCommitData */ private Map extraData = new HashMap<>(0); + /** + * IOStatistics. + */ + @JsonProperty("iostatistics") + private IOStatisticsSnapshot iostats = new IOStatisticsSnapshot(); + /** Destination file size. */ private long length; @@ -207,7 +222,7 @@ public void validate() throws ValidationFailure { @Override public String toString() { final StringBuilder sb = new StringBuilder( - "DelayedCompleteData{"); + "SinglePendingCommit{"); sb.append("version=").append(version); sb.append(", uri='").append(uri).append('\''); sb.append(", destination='").append(destinationKey).append('\''); @@ -418,6 +433,15 @@ public void setExtraData(Map extraData) { this.extraData = extraData; } + /** + * Set/Update an extra data entry. + * @param key key + * @param value value + */ + public void putExtraData(String key, String value) { + extraData.put(key, value); + } + /** * Destination file size. * @return size of destination object @@ -429,4 +453,13 @@ public long getLength() { public void setLength(long length) { this.length = length; } + + @Override + public IOStatisticsSnapshot getIOStatistics() { + return iostats; + } + + public void setIOStatistics(final IOStatisticsSnapshot ioStatistics) { + this.iostats = ioStatistics; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java index e0273fa11a584..4500e1429e447 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; +import com.fasterxml.jackson.annotation.JsonProperty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,6 +35,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.commit.ValidationFailure; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.util.JsonSerialization; /** @@ -62,20 +65,29 @@ @SuppressWarnings("unused") @InterfaceAudience.Private @InterfaceStability.Unstable -public class SuccessData extends PersistentCommitData { +public class SuccessData extends PersistentCommitData + implements IOStatisticsSource { + private static final Logger LOG = LoggerFactory.getLogger(SuccessData.class); + /** + * Supported version value: {@value}. + * If this is changed the value of {@link #serialVersionUID} will change, + * to avoid deserialization problems. + */ + public static final int VERSION = 1; + /** * Serialization ID: {@value}. */ - private static final long serialVersionUID = 507133045258460084L; + private static final long serialVersionUID = 507133045258460083L + VERSION; /** * Name to include in persisted data, so as to differentiate from * any other manifests: {@value}. */ public static final String NAME - = "org.apache.hadoop.fs.s3a.commit.files.SuccessData/1"; + = "org.apache.hadoop.fs.s3a.commit.files.SuccessData/" + VERSION; /** * Name of file; includes version marker. @@ -103,6 +115,14 @@ public class SuccessData extends PersistentCommitData { */ private String description; + /** Job ID, if known. */ + private String jobId = ""; + + /** + * Source of the job ID. + */ + private String jobIdSource = ""; + /** * Metrics. */ @@ -118,6 +138,12 @@ public class SuccessData extends PersistentCommitData { */ private List filenames = new ArrayList<>(0); + /** + * IOStatistics. + */ + @JsonProperty("iostatistics") + private IOStatisticsSnapshot iostats = new IOStatisticsSnapshot(); + @Override public void validate() throws ValidationFailure { ValidationFailure.verify(name != null, @@ -325,4 +351,30 @@ public void setDiagnostics(Map diagnostics) { public void addDiagnostic(String key, String value) { diagnostics.put(key, value); } + + /** @return Job ID, if known. */ + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public String getJobIdSource() { + return jobIdSource; + } + + public void setJobIdSource(final String jobIdSource) { + this.jobIdSource = jobIdSource; + } + + @Override + public IOStatisticsSnapshot getIOStatistics() { + return iostats; + } + + public void setIOStatistics(final IOStatisticsSnapshot ioStatistics) { + this.iostats = ioStatistics; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java index a619fc7b7d94e..c3a70bffac057 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java @@ -20,11 +20,13 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; import com.amazonaws.services.s3.model.PartETag; import com.amazonaws.services.s3.model.PutObjectRequest; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,6 +36,10 @@ import org.apache.hadoop.fs.s3a.WriteOperationHelper; import org.apache.hadoop.fs.s3a.commit.PutTracker; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; + +import static org.apache.hadoop.fs.s3a.commit.CommitConstants.X_HEADER_MAGIC_MARKER; /** * Put tracker for Magic commits. @@ -102,6 +108,7 @@ public boolean outputImmediatelyVisible() { * @param uploadId Upload ID * @param parts list of parts * @param bytesWritten bytes written + * @param iostatistics nullable IO statistics * @return false, indicating that the commit must fail. * @throws IOException any IO problem. * @throws IllegalArgumentException bad argument @@ -109,7 +116,8 @@ public boolean outputImmediatelyVisible() { @Override public boolean aboutToComplete(String uploadId, List parts, - long bytesWritten) + long bytesWritten, + final IOStatistics iostatistics) throws IOException { Preconditions.checkArgument(StringUtils.isNotEmpty(uploadId), "empty/null upload ID: "+ uploadId); @@ -117,6 +125,8 @@ public boolean aboutToComplete(String uploadId, "No uploaded parts list"); Preconditions.checkArgument(!parts.isEmpty(), "No uploaded parts to save"); + + // build the commit summary SinglePendingCommit commitData = new SinglePendingCommit(); commitData.touch(System.currentTimeMillis()); commitData.setDestinationKey(getDestKey()); @@ -126,23 +136,29 @@ public boolean aboutToComplete(String uploadId, commitData.setText(""); commitData.setLength(bytesWritten); commitData.bindCommitData(parts); + commitData.setIOStatistics( + new IOStatisticsSnapshot(iostatistics)); byte[] bytes = commitData.toBytes(); LOG.info("Uncommitted data pending to file {};" - + " commit metadata for {} parts in {}. sixe: {} byte(s)", + + " commit metadata for {} parts in {}. size: {} byte(s)", path.toUri(), parts.size(), pendingPartKey, bytesWritten); LOG.debug("Closed MPU to {}, saved commit information to {}; data=:\n{}", path, pendingPartKey, commitData); PutObjectRequest put = writer.createPutObjectRequest( pendingPartKey, new ByteArrayInputStream(bytes), - bytes.length); + bytes.length, null); writer.uploadObject(put); + // Add the final file length as a header + Map headers = new HashMap<>(); + headers.put(X_HEADER_MAGIC_MARKER, Long.toString(bytesWritten)); // now put a 0-byte file with the name of the original under-magic path PutObjectRequest originalDestPut = writer.createPutObjectRequest( originalDestKey, new ByteArrayInputStream(EMPTY), - 0); + 0, + headers); writer.uploadObject(originalDestPut); return false; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicS3GuardCommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicS3GuardCommitter.java index 99121730644b1..34bbfd4ed76e2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicS3GuardCommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicS3GuardCommitter.java @@ -37,15 +37,18 @@ import org.apache.hadoop.fs.s3a.commit.CommitUtilsWithMR; import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; +import org.apache.hadoop.fs.statistics.IOStatisticsLogging; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.util.DurationInfo; import static org.apache.hadoop.fs.s3a.S3AUtils.*; +import static org.apache.hadoop.fs.s3a.commit.CommitConstants.TASK_ATTEMPT_ID; import static org.apache.hadoop.fs.s3a.commit.CommitUtils.*; import static org.apache.hadoop.fs.s3a.commit.MagicCommitPaths.*; import static org.apache.hadoop.fs.s3a.commit.CommitUtilsWithMR.*; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.demandStringifyIOStatistics; /** * This is a dedicated committer which requires the "magic" directory feature @@ -96,6 +99,7 @@ protected boolean requiresDelayedCommitOutputInFileSystem() { public void setupJob(JobContext context) throws IOException { try (DurationInfo d = new DurationInfo(LOG, "Setup Job %s", jobIdString(context))) { + super.setupJob(context); Path jobAttemptPath = getJobAttemptPath(context); getDestinationFS(jobAttemptPath, context.getConfiguration()).mkdirs(jobAttemptPath); @@ -130,16 +134,6 @@ public void cleanupStagingDirs() { } } - @Override - public void setupTask(TaskAttemptContext context) throws IOException { - try (DurationInfo d = new DurationInfo(LOG, - "Setup Task %s", context.getTaskAttemptID())) { - Path taskAttemptPath = getTaskAttemptPath(context); - FileSystem fs = taskAttemptPath.getFileSystem(getConf()); - fs.mkdirs(taskAttemptPath); - } - } - /** * Did this task write any files in the work directory? * Probes for a task existing by looking to see if the attempt dir exists. @@ -177,6 +171,8 @@ public void commitTask(TaskAttemptContext context) throws IOException { destroyThreadPool(); } getCommitOperations().taskCompleted(true); + LOG.debug("aggregate statistics\n{}", + demandStringifyIOStatistics(getIOStatistics())); } /** @@ -207,21 +203,25 @@ private PendingSet innerCommitTask( throw failures.get(0).getValue(); } // patch in IDs - String jobId = String.valueOf(context.getJobID()); + String jobId = getUUID(); String taskId = String.valueOf(context.getTaskAttemptID()); for (SinglePendingCommit commit : pendingSet.getCommits()) { commit.setJobId(jobId); commit.setTaskId(taskId); } - + pendingSet.putExtraData(TASK_ATTEMPT_ID, taskId); + pendingSet.setJobId(jobId); Path jobAttemptPath = getJobAttemptPath(context); TaskAttemptID taskAttemptID = context.getTaskAttemptID(); Path taskOutcomePath = new Path(jobAttemptPath, taskAttemptID.getTaskID().toString() + CommitConstants.PENDINGSET_SUFFIX); LOG.info("Saving work of {} to {}", taskAttemptID, taskOutcomePath); + LOG.debug("task statistics\n{}", + IOStatisticsLogging.demandStringifyIOStatisticsSource(pendingSet)); try { - pendingSet.save(getDestFS(), taskOutcomePath, false); + // We will overwrite if there exists a pendingSet file already + pendingSet.save(getDestFS(), taskOutcomePath, true); } catch (IOException e) { LOG.warn("Failed to save task commit data to {} ", taskOutcomePath, e); @@ -257,11 +257,12 @@ public void abortTask(TaskAttemptContext context) throws IOException { /** * Compute the path where the output of a given job attempt will be placed. + * For the magic committer, the path includes the job UUID. * @param appAttemptId the ID of the application attempt for this job. * @return the path to store job attempt data. */ protected Path getJobAttemptPath(int appAttemptId) { - return getMagicJobAttemptPath(appAttemptId, getOutputPath()); + return getMagicJobAttemptPath(getUUID(), getOutputPath()); } /** @@ -272,12 +273,12 @@ protected Path getJobAttemptPath(int appAttemptId) { * @return the path where a task attempt should be stored. */ public Path getTaskAttemptPath(TaskAttemptContext context) { - return getMagicTaskAttemptPath(context, getOutputPath()); + return getMagicTaskAttemptPath(context, getUUID(), getOutputPath()); } @Override protected Path getBaseTaskAttemptPath(TaskAttemptContext context) { - return getBaseMagicTaskAttemptPath(context, getOutputPath()); + return getBaseMagicTaskAttemptPath(context, getUUID(), getOutputPath()); } /** @@ -287,13 +288,16 @@ protected Path getBaseTaskAttemptPath(TaskAttemptContext context) { * @return a path for temporary data. */ public Path getTempTaskAttemptPath(TaskAttemptContext context) { - return CommitUtilsWithMR.getTempTaskAttemptPath(context, getOutputPath()); + return CommitUtilsWithMR.getTempTaskAttemptPath(context, + getUUID(), + getOutputPath()); } @Override public String toString() { final StringBuilder sb = new StringBuilder( "MagicCommitter{"); + sb.append(super.toString()); sb.append('}'); return sb.toString(); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitter.java index 20aca3cf49ae0..214c7abdc732a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitter.java @@ -23,7 +23,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -187,7 +186,7 @@ private void replacePartitions( Map partitions = new ConcurrentHashMap<>(); FileSystem sourceFS = pending.getSourceFS(); - ExecutorService pool = buildThreadPool(context); + Tasks.Submitter submitter = buildSubmitter(context); try (DurationInfo ignored = new DurationInfo(LOG, "Replacing partitions")) { @@ -198,9 +197,10 @@ private void replacePartitions( Tasks.foreach(pending.getSourceFiles()) .stopOnFailure() .suppressExceptions(false) - .executeWith(pool) - .run(path -> { - PendingSet pendingSet = PendingSet.load(sourceFS, path); + .executeWith(submitter) + .run(status -> { + PendingSet pendingSet = PendingSet.load(sourceFS, + status); Path lastParent = null; for (SinglePendingCommit commit : pendingSet.getCommits()) { Path parent = commit.destinationPath().getParent(); @@ -216,7 +216,7 @@ private void replacePartitions( Tasks.foreach(partitions.keySet()) .stopOnFailure() .suppressExceptions(false) - .executeWith(pool) + .executeWith(submitter) .run(partitionPath -> { LOG.debug("{}: removing partition path to be replaced: " + getRole(), partitionPath); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java index a941572f1e7d6..5f9e6e21363ee 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java @@ -23,12 +23,12 @@ import java.util.Set; import java.util.concurrent.ExecutionException; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.UncheckedExecutionException; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.UncheckedExecutionException; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -43,6 +43,7 @@ import org.apache.hadoop.security.UserGroupInformation; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.JAVA_IO_TMPDIR; import static org.apache.hadoop.fs.s3a.commit.staging.StagingCommitterConstants.*; /** @@ -141,14 +142,18 @@ public static Path path(Path parent, String... child) { } /** - * A cache of temporary folders. There's a risk here that the cache - * gets too big + * A cache of temporary folders, using a generated ID which must be unique for + * each active task attempt. */ - private static Cache tempFolders = CacheBuilder + private static Cache tempFolders = CacheBuilder .newBuilder().build(); /** * Get the task attempt temporary directory in the local filesystem. + * This must be unique to all tasks on all jobs running on all processes + * on this host. + * It's constructed as uuid+task-attempt-ID, relying on UUID to be unique + * for each job. * @param conf configuration * @param uuid some UUID, such as a job UUID * @param attemptID attempt ID @@ -162,10 +167,11 @@ public static Path getLocalTaskAttemptTempDir(final Configuration conf, try { final LocalDirAllocator allocator = new LocalDirAllocator(Constants.BUFFER_DIR); - return tempFolders.get(attemptID, + String name = uuid + "-" + attemptID; + return tempFolders.get(name, () -> { return FileSystem.getLocal(conf).makeQualified( - allocator.getLocalPathForWrite(uuid, conf)); + allocator.getLocalPathForWrite(name, conf)); }); } catch (ExecutionException | UncheckedExecutionException e) { Throwable cause = e.getCause(); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java index 7eca1b42659e5..59114f7ab7340 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitter.java @@ -26,7 +26,6 @@ import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; -import com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,16 +45,14 @@ import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; import org.apache.hadoop.mapreduce.JobContext; -import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter; import org.apache.hadoop.util.DurationInfo; -import static com.google.common.base.Preconditions.*; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.Invoker.*; -import static org.apache.hadoop.fs.s3a.commit.staging.StagingCommitterConstants.*; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; import static org.apache.hadoop.fs.s3a.commit.CommitUtils.*; import static org.apache.hadoop.fs.s3a.commit.CommitUtilsWithMR.*; @@ -95,7 +92,6 @@ public class StagingCommitter extends AbstractS3ACommitter { public static final String NAME = "staging"; private final Path constructorOutputPath; private final long uploadPartSize; - private final String uuid; private final boolean uniqueFilenames; private final FileOutputCommitter wrappedCommitter; @@ -118,15 +114,14 @@ public StagingCommitter(Path outputPath, Configuration conf = getConf(); this.uploadPartSize = conf.getLongBytes( MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE); - this.uuid = getUploadUUID(conf, context.getJobID()); this.uniqueFilenames = conf.getBoolean( FS_S3A_COMMITTER_STAGING_UNIQUE_FILENAMES, DEFAULT_STAGING_COMMITTER_UNIQUE_FILENAMES); - setWorkPath(buildWorkPath(context, uuid)); + setWorkPath(buildWorkPath(context, getUUID())); this.wrappedCommitter = createWrappedCommitter(context, conf); setOutputPath(constructorOutputPath); Path finalOutputPath = getOutputPath(); - Preconditions.checkNotNull(finalOutputPath, "Output path cannot be null"); + checkNotNull(finalOutputPath, "Output path cannot be null"); S3AFileSystem fs = getS3AFileSystem(finalOutputPath, context.getConfiguration(), false); s3KeyPrefix = fs.pathToKey(finalOutputPath); @@ -156,7 +151,8 @@ protected FileOutputCommitter createWrappedCommitter(JobContext context, // explicitly choose commit algorithm initFileOutputCommitterOptions(context); - commitsDirectory = Paths.getMultipartUploadCommitsDirectory(conf, uuid); + commitsDirectory = Paths.getMultipartUploadCommitsDirectory(conf, + getUUID()); return new FileOutputCommitter(commitsDirectory, context); } @@ -175,7 +171,10 @@ protected void initFileOutputCommitterOptions(JobContext context) { public String toString() { final StringBuilder sb = new StringBuilder("StagingCommitter{"); sb.append(super.toString()); + sb.append(", commitsDirectory=").append(commitsDirectory); + sb.append(", uniqueFilenames=").append(uniqueFilenames); sb.append(", conflictResolution=").append(conflictResolution); + sb.append(", uploadPartSize=").append(uploadPartSize); if (wrappedCommitter != null) { sb.append(", wrappedCommitter=").append(wrappedCommitter); } @@ -183,40 +182,6 @@ public String toString() { return sb.toString(); } - /** - * Get the UUID of an upload; may be the job ID. - * Spark will use a fake app ID based on the current minute and job ID 0. - * To avoid collisions, the key policy is: - *
      - *
    1. Value of {@link InternalCommitterConstants#FS_S3A_COMMITTER_STAGING_UUID}.
    2. - *
    3. Value of {@code "spark.sql.sources.writeJobUUID"}.
    4. - *
    5. Value of {@code "spark.app.id"}.
    6. - *
    7. JobId passed in.
    8. - *
    - * The staging UUID is set in in {@link #setupJob(JobContext)} and so will - * be valid in all sequences where the job has been set up for the - * configuration passed in. - * @param conf job/task configuration - * @param jobId Job ID - * @return an ID for use in paths. - */ - public static String getUploadUUID(Configuration conf, String jobId) { - return conf.getTrimmed( - InternalCommitterConstants.FS_S3A_COMMITTER_STAGING_UUID, - conf.getTrimmed(SPARK_WRITE_UUID, - conf.getTrimmed(SPARK_APP_ID, jobId))); - } - - /** - * Get the UUID of a Job. - * @param conf job/task configuration - * @param jobId Job ID - * @return an ID for use in paths. - */ - public static String getUploadUUID(Configuration conf, JobID jobId) { - return getUploadUUID(conf, jobId.toString()); - } - /** * Get the work path for a task. * @param context job/task complex @@ -309,7 +274,7 @@ public static Path getTaskAttemptPath(TaskAttemptContext context, Path out) { * @return the location of pending job attempts. */ private static Path getPendingJobAttemptsPath(Path out) { - Preconditions.checkNotNull(out, "Null 'out' path"); + checkNotNull(out, "Null 'out' path"); return new Path(out, TEMPORARY); } @@ -330,12 +295,12 @@ public Path getCommittedTaskPath(TaskAttemptContext context) { * @param context task context */ private static void validateContext(TaskAttemptContext context) { - Preconditions.checkNotNull(context, "null context"); - Preconditions.checkNotNull(context.getTaskAttemptID(), + checkNotNull(context, "null context"); + checkNotNull(context.getTaskAttemptID(), "null task attempt ID"); - Preconditions.checkNotNull(context.getTaskAttemptID().getTaskID(), + checkNotNull(context.getTaskAttemptID().getTaskID(), "null task ID"); - Preconditions.checkNotNull(context.getTaskAttemptID().getJobID(), + checkNotNull(context.getTaskAttemptID().getJobID(), "null job ID"); } @@ -377,7 +342,7 @@ protected List getTaskOutput(TaskAttemptContext context) // get files on the local FS in the attempt path Path attemptPath = getTaskAttemptPath(context); - Preconditions.checkNotNull(attemptPath, + checkNotNull(attemptPath, "No attemptPath path in {}", this); LOG.debug("Scanning {} for files to commit", attemptPath); @@ -401,7 +366,8 @@ protected List getTaskOutput(TaskAttemptContext context) */ protected String getFinalKey(String relative, JobContext context) { if (uniqueFilenames) { - return getS3KeyPrefix(context) + "/" + Paths.addUUID(relative, uuid); + return getS3KeyPrefix(context) + "/" + + Paths.addUUID(relative, getUUID()); } else { return getS3KeyPrefix(context) + "/" + relative; } @@ -452,11 +418,8 @@ public Path getJobAttemptPath(JobContext context) { */ @Override public void setupJob(JobContext context) throws IOException { - LOG.debug("{}, Setting up job {}", getRole(), jobIdString(context)); - context.getConfiguration().set( - InternalCommitterConstants.FS_S3A_COMMITTER_STAGING_UUID, uuid); - wrappedCommitter.setupJob(context); super.setupJob(context); + wrappedCommitter.setupJob(context); } /** @@ -539,19 +502,6 @@ protected void cleanup(JobContext context, super.cleanup(context, suppressExceptions); } - @Override - protected void abortPendingUploadsInCleanup(boolean suppressExceptions) - throws IOException { - if (getConf() - .getBoolean(FS_S3A_COMMITTER_STAGING_ABORT_PENDING_UPLOADS, true)) { - super.abortPendingUploadsInCleanup(suppressExceptions); - } else { - LOG.info("Not cleanup up pending uploads to {} as {} is false ", - getOutputPath(), - FS_S3A_COMMITTER_STAGING_ABORT_PENDING_UPLOADS); - } - } - @Override protected void abortJobInternal(JobContext context, boolean suppressExceptions) throws IOException { @@ -608,8 +558,7 @@ public void setupTask(TaskAttemptContext context) throws IOException { Path taskAttemptPath = getTaskAttemptPath(context); try (DurationInfo d = new DurationInfo(LOG, "%s: setup task attempt path %s ", getRole(), taskAttemptPath)) { - // create the local FS - taskAttemptPath.getFileSystem(getConf()).mkdirs(taskAttemptPath); + super.setupTask(context); wrappedCommitter.setupTask(context); } } @@ -695,11 +644,13 @@ protected int commitTaskInternal(final TaskAttemptContext context, context.progress(); PendingSet pendingCommits = new PendingSet(commitCount); + pendingCommits.putExtraData(TASK_ATTEMPT_ID, + context.getTaskAttemptID().toString()); try { Tasks.foreach(taskOutput) .stopOnFailure() .suppressExceptions(false) - .executeWith(buildThreadPool(context)) + .executeWith(buildSubmitter(context)) .run(stat -> { Path path = stat.getPath(); File localFile = new File(path.toUri().getPath()); @@ -830,15 +781,6 @@ private String getS3KeyPrefix(JobContext context) { return s3KeyPrefix; } - /** - * A UUID for this upload, as calculated with. - * {@link #getUploadUUID(Configuration, String)} - * @return the UUID for files - */ - protected String getUUID() { - return uuid; - } - /** * Returns the {@link ConflictResolution} mode for this commit. * diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitterConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitterConstants.java index c41715bd497d8..ee2b9eca5015d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitterConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/StagingCommitterConstants.java @@ -46,19 +46,4 @@ private StagingCommitterConstants() { */ public static final String STAGING_UPLOADS = "staging-uploads"; - // Spark configuration keys - - /** - * The UUID for jobs: {@value}. - */ - public static final String SPARK_WRITE_UUID = - "spark.sql.sources.writeJobUUID"; - - /** - * The App ID for jobs. - */ - - public static final String SPARK_APP_ID = "spark.app.id"; - - public static final String JAVA_IO_TMPDIR = "java.io.tmpdir"; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java index 904d5f750dbee..98b76b15da48f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/AbstractStoreOperation.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a.impl; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Base class of operations in the store. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ActiveOperationContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ActiveOperationContext.java new file mode 100644 index 0000000000000..3ad2bbff3ba92 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ActiveOperationContext.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import javax.annotation.Nullable; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; + +/** + * Context for any active operation. + */ +public class ActiveOperationContext { + + /** + * An operation ID; currently just for logging...proper tracing needs more. + */ + private final long operationId; + + /** + * Statistics context. + */ + private final S3AStatisticsContext statisticsContext; + + /** + * S3Guard bulk operation state, if (currently) set. + */ + @Nullable private BulkOperationState bulkOperationState; + + public ActiveOperationContext( + final long operationId, + final S3AStatisticsContext statisticsContext, + @Nullable final BulkOperationState bulkOperationState) { + this.operationId = operationId; + this.statisticsContext = Objects.requireNonNull(statisticsContext, + "null statistics context"); + this.bulkOperationState = bulkOperationState; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "ActiveOperation{"); + sb.append("operationId=").append(operationId); + sb.append(", bulkOperationState=").append(bulkOperationState); + sb.append('}'); + return sb.toString(); + } + + @Nullable + public BulkOperationState getBulkOperationState() { + return bulkOperationState; + } + + public long getOperationId() { + return operationId; + } + + public S3AStatisticsContext getS3AStatisticsContext() { + return statisticsContext; + } + + private static final AtomicLong NEXT_OPERATION_ID = new AtomicLong(0); + + /** + * Create an operation ID. The nature of it should be opaque. + * @return an ID for the constructor. + */ + protected static long newOperationId() { + return NEXT_OPERATION_ID.incrementAndGet(); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/BulkDeleteRetryHandler.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/BulkDeleteRetryHandler.java index b2c1cc6271ec7..4169a9899cb15 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/BulkDeleteRetryHandler.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/BulkDeleteRetryHandler.java @@ -26,9 +26,9 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.s3a.AWSClientIOException; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; import org.apache.hadoop.fs.s3a.S3AStorageStatistics; import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; import static org.apache.hadoop.fs.s3a.S3AUtils.isThrottleException; import static org.apache.hadoop.fs.s3a.Statistic.IGNORED_ERRORS; @@ -53,7 +53,7 @@ public class BulkDeleteRetryHandler extends AbstractStoreOperation { */ public static final String XML_PARSE_BROKEN = "Failed to parse XML document"; - private final S3AInstrumentation instrumentation; + private final S3AStatisticsContext instrumentation; private final S3AStorageStatistics storageStatistics; @@ -84,7 +84,6 @@ protected void incrementStatistic(Statistic statistic) { */ protected void incrementStatistic(Statistic statistic, long count) { instrumentation.incrementCounter(statistic, count); - storageStatistics.incrementCounter(statistic, count); } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java index 1c61d30b08a60..b788f507735a0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CallableSupplier.java @@ -20,6 +20,7 @@ import javax.annotation.Nullable; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; @@ -31,7 +32,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.fs.impl.WrappedIOException; import org.apache.hadoop.util.DurationInfo; import static org.apache.hadoop.fs.impl.FutureIOSupport.raiseInnerCause; @@ -63,9 +63,9 @@ public Object get() { } catch (RuntimeException e) { throw e; } catch (IOException e) { - throw new WrappedIOException(e); + throw new UncheckedIOException(e); } catch (Exception e) { - throw new WrappedIOException(new IOException(e)); + throw new UncheckedIOException(new IOException(e)); } } @@ -73,7 +73,7 @@ public Object get() { * Submit a callable into a completable future. * RTEs are rethrown. * Non RTEs are caught and wrapped; IOExceptions to - * {@link WrappedIOException} instances. + * {@code RuntimeIOException} instances. * @param executor executor. * @param call call to invoke * @param type diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeDetectionPolicy.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeDetectionPolicy.java index fc6fcd31819d7..2d49cc939a41a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeDetectionPolicy.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeDetectionPolicy.java @@ -25,7 +25,7 @@ import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.transfer.model.CopyResult; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeTracker.java index d34328ccbfe7c..48050563cde2b 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ChangeTracker.java @@ -18,8 +18,6 @@ package org.apache.hadoop.fs.s3a.impl; -import java.util.concurrent.atomic.AtomicLong; - import com.amazonaws.AmazonServiceException; import com.amazonaws.SdkBaseException; import com.amazonaws.services.s3.model.CopyObjectRequest; @@ -28,7 +26,7 @@ import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.transfer.model.CopyResult; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,8 +37,9 @@ import org.apache.hadoop.fs.s3a.NoVersionAttributeException; import org.apache.hadoop.fs.s3a.RemoteFileChangedException; import org.apache.hadoop.fs.s3a.S3ObjectAttributes; +import org.apache.hadoop.fs.s3a.statistics.ChangeTrackerStatistics; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Change tracking for input streams: the version ID or etag of the object is @@ -72,7 +71,7 @@ public class ChangeTracker { * Mismatch counter; expected to be wired up to StreamStatistics except * during testing. */ - private final AtomicLong versionMismatches; + private final ChangeTrackerStatistics versionMismatches; /** * Revision identifier (e.g. eTag or versionId, depending on change @@ -90,7 +89,7 @@ public class ChangeTracker { */ public ChangeTracker(final String uri, final ChangeDetectionPolicy policy, - final AtomicLong versionMismatches, + final ChangeTrackerStatistics versionMismatches, final S3ObjectAttributes s3ObjectAttributes) { this.policy = checkNotNull(policy); this.uri = uri; @@ -111,8 +110,8 @@ public ChangeDetectionPolicy.Source getSource() { } @VisibleForTesting - public AtomicLong getVersionMismatches() { - return versionMismatches; + public long getVersionMismatches() { + return versionMismatches.getVersionMismatches(); } /** @@ -177,7 +176,7 @@ public void processResponse(final S3Object object, if (revisionId != null) { // the requirements of the change detection policy wasn't met: the // object was not returned. - versionMismatches.incrementAndGet(); + versionMismatches.versionMismatchError(); throw new RemoteFileChangedException(uri, operation, String.format(CHANGE_REPORTED_BY_S3 + " during %s" @@ -235,7 +234,7 @@ public void processException(SdkBaseException e, String operation) throws // This isn't really going to be hit due to // https://github.com/aws/aws-sdk-java/issues/1644 if (serviceException.getStatusCode() == SC_PRECONDITION_FAILED) { - versionMismatches.incrementAndGet(); + versionMismatches.versionMismatchError(); throw new RemoteFileChangedException(uri, operation, String.format( RemoteFileChangedException.PRECONDITIONS_FAILED + " on %s." @@ -292,10 +291,10 @@ private void processNewRevision(final String newRevisionId, uri, pos, operation, - versionMismatches.get()); + versionMismatches.getVersionMismatches()); if (pair.left) { // an mismatch has occurred: note it. - versionMismatches.incrementAndGet(); + versionMismatches.versionMismatchError(); } if (pair.right != null) { // there's an exception to raise: do it diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ConfigureShadedAWSSocketFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ConfigureShadedAWSSocketFactory.java new file mode 100644 index 0000000000000..8205668e8f354 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ConfigureShadedAWSSocketFactory.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import javax.net.ssl.HostnameVerifier; +import java.io.IOException; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.thirdparty.apache.http.conn.ssl.SSLConnectionSocketFactory; + +import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; + +/** + * This interacts with the Shaded httpclient library used in the full + * AWS SDK. If the S3A client is used with the unshaded SDK, this + * class will not link. + */ +public class ConfigureShadedAWSSocketFactory implements + NetworkBinding.ConfigureAWSSocketFactory { + + @Override + public void configureSocketFactory(final ClientConfiguration awsConf, + final DelegatingSSLSocketFactory.SSLChannelMode channelMode) + throws IOException { + DelegatingSSLSocketFactory.initializeDefaultFactory(channelMode); + awsConf.getApacheHttpClientConfig().setSslSocketFactory( + new SSLConnectionSocketFactory( + DelegatingSSLSocketFactory.getDefaultFactory(), + (HostnameVerifier) null)); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java index b10cc6d857ad9..27ac7dec1dd19 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ContextAccessors.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.nio.file.AccessDeniedException; +import com.amazonaws.services.s3.model.ObjectMetadata; + import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.Retries; @@ -73,4 +75,23 @@ public interface ContextAccessors { */ @Retries.RetryTranslated String getBucketLocation() throws IOException; + + /** + * Qualify a path. + * + * @param path path to qualify/normalize + * @return possibly new path. + */ + Path makeQualified(Path path); + + /** + * Retrieve the object metadata. + * + * @param key key to retrieve. + * @return metadata + * @throws IOException IO and object access problems. + */ + @Retries.RetryTranslated + ObjectMetadata getObjectMetadata(String key) throws IOException; + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java index daf93d99bd9be..2292179b3fd66 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java @@ -23,10 +23,12 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.DeleteObjectsResult; -import com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +46,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DurationInfo; -import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.maybeAwaitCompletion; import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; @@ -152,10 +154,13 @@ public class DeleteOperation extends ExecutingStoreOperation { /** * List of keys built up for the next delete batch. */ - private List keys; + private List keys; /** - * List of paths built up for deletion. + * List of paths built up for incremental deletion on tree delete. + * At the end of the entire delete the full tree is scanned in S3Guard + * and tombstones added. For this reason this list of paths must not + * include directory markers, as that will break the scan. */ private List paths; @@ -203,7 +208,8 @@ public DeleteOperation(final StoreContext context, "page size out of range: %s", pageSize); this.pageSize = pageSize; metadataStore = context.getMetadataStore(); - executor = context.createThrottledExecutor(1); + executor = MoreExecutors.listeningDecorator( + context.createThrottledExecutor(1)); } public long getFilesDeleted() { @@ -279,7 +285,7 @@ public Boolean execute() throws IOException { LOG.debug("deleting simple file {}", path); deleteObjectAtPath(path, key, true); } - LOG.debug("Deleted {} files", filesDeleted); + LOG.debug("Deleted {} objects", filesDeleted); return true; } @@ -323,7 +329,7 @@ protected void deleteDirectoryTree(final Path path, // list files including any under tombstones through S3Guard LOG.debug("Getting objects for directory prefix {} to delete", dirKey); final RemoteIterator locatedFiles = - callbacks.listFilesAndEmptyDirectories(path, status, + callbacks.listFilesAndDirectoryMarkers(path, status, false, true); // iterate through and delete. The next() call will block when a new S3 @@ -359,7 +365,10 @@ protected void deleteDirectoryTree(final Path path, while (objects.hasNext()) { // get the next entry in the listing. extraFilesDeleted++; - queueForDeletion(deletionKey(objects.next()), null); + S3AFileStatus next = objects.next(); + LOG.debug("Found Unlisted entry {}", next); + queueForDeletion(deletionKey(next), null, + next.isDirectory()); } if (extraFilesDeleted > 0) { LOG.debug("Raw S3 Scan found {} extra file(s) to delete", @@ -402,7 +411,7 @@ private String deletionKey(final S3AFileStatus stat) { */ private void queueForDeletion( final S3AFileStatus stat) throws IOException { - queueForDeletion(deletionKey(stat), stat.getPath()); + queueForDeletion(deletionKey(stat), stat.getPath(), stat.isDirectory()); } /** @@ -413,14 +422,18 @@ private void queueForDeletion( * * @param key key to delete * @param deletePath nullable path of the key + * @param isDirMarker is the entry a directory? * @throws IOException failure of the previous batch of deletions. */ private void queueForDeletion(final String key, - @Nullable final Path deletePath) throws IOException { + @Nullable final Path deletePath, + boolean isDirMarker) throws IOException { LOG.debug("Adding object to delete: \"{}\"", key); - keys.add(new DeleteObjectsRequest.KeyVersion(key)); + keys.add(new DeleteEntry(key, isDirMarker)); if (deletePath != null) { - paths.add(deletePath); + if (!isDirMarker) { + paths.add(deletePath); + } } if (keys.size() == pageSize) { @@ -484,7 +497,7 @@ private void deleteObjectAtPath( * @return the submitted future or null */ private CompletableFuture submitDelete( - final List keyList, + final List keyList, final List pathList) { if (keyList.isEmpty() && pathList.isEmpty()) { @@ -514,31 +527,62 @@ private CompletableFuture submitDelete( @Retries.RetryTranslated private void asyncDeleteAction( final BulkOperationState state, - final List keyList, + final List keyList, final List pathList, final boolean auditDeletedKeys) throws IOException { + List deletedObjects = new ArrayList<>(); try (DurationInfo ignored = - new DurationInfo(LOG, false, "Delete page of keys")) { + new DurationInfo(LOG, false, + "Delete page of %d keys", keyList.size())) { DeleteObjectsResult result = null; List undeletedObjects = new ArrayList<>(); if (!keyList.isEmpty()) { - result = Invoker.once("Remove S3 Keys", + // first delete the files. + List files = keyList.stream() + .filter(e -> !e.isDirMarker) + .map(e -> e.keyVersion) + .collect(Collectors.toList()); + LOG.debug("Deleting of {} file objects", files.size()); + result = Invoker.once("Remove S3 Files", status.getPath().toString(), () -> callbacks.removeKeys( - keyList, + files, false, undeletedObjects, state, !auditDeletedKeys)); + if (result != null) { + deletedObjects.addAll(result.getDeletedObjects()); + } + // now the dirs + List dirs = keyList.stream() + .filter(e -> e.isDirMarker) + .map(e -> e.keyVersion) + .collect(Collectors.toList()); + LOG.debug("Deleting of {} directory markers", dirs.size()); + // This is invoked with deleteFakeDir = true, so + // S3Guard is not updated. + result = Invoker.once("Remove S3 Dir Markers", + status.getPath().toString(), + () -> callbacks.removeKeys( + dirs, + true, + undeletedObjects, + state, + !auditDeletedKeys)); + if (result != null) { + deletedObjects.addAll(result.getDeletedObjects()); + } } if (!pathList.isEmpty()) { + // delete file paths only. This stops tombstones + // being added until the final directory cleanup + // (HADOOP-17244) metadataStore.deletePaths(pathList, state); } - if (auditDeletedKeys && result != null) { + if (auditDeletedKeys) { // audit the deleted keys - List deletedObjects = - result.getDeletedObjects(); if (deletedObjects.size() != keyList.size()) { // size mismatch LOG.warn("Size mismatch in deletion operation. " @@ -549,7 +593,7 @@ private void asyncDeleteAction( for (DeleteObjectsResult.DeletedObject del : deletedObjects) { keyList.removeIf(kv -> kv.getKey().equals(del.getKey())); } - for (DeleteObjectsRequest.KeyVersion kv : keyList) { + for (DeleteEntry kv : keyList) { LOG.debug("{}", kv.getKey()); } } @@ -557,5 +601,31 @@ private void asyncDeleteAction( } } + /** + * Deletion entry; dir marker state is tracked to control S3Guard + * update policy. + */ + private static final class DeleteEntry { + private final DeleteObjectsRequest.KeyVersion keyVersion; + + private final boolean isDirMarker; + + private DeleteEntry(final String key, final boolean isDirMarker) { + this.keyVersion = new DeleteObjectsRequest.KeyVersion(key); + this.isDirMarker = isDirMarker; + } + + public String getKey() { + return keyVersion.getKey(); + } + + @Override + public String toString() { + return "DeleteEntry{" + + "key='" + getKey() + '\'' + + ", isDirMarker=" + isDirMarker + + '}'; + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirMarkerTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirMarkerTracker.java new file mode 100644 index 0000000000000..ca04fed65a539 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirMarkerTracker.java @@ -0,0 +1,352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3ALocatedFileStatus; + +/** + * Tracks directory markers which have been reported in object listings. + * This is needed for auditing and cleanup, including during rename + * operations. + *

    + * Designed to be used while scanning through the results of listObject + * calls, where are we assume the results come in alphanumeric sort order + * and parent entries before children. + *

    + * This lets as assume that we can identify all leaf markers as those + * markers which were added to set of leaf markers and not subsequently + * removed as a child entries were discovered. + *

    + * To avoid scanning datastructures excessively, the path of the parent + * directory of the last file added is cached. This allows for a + * quick bailout when many children of the same directory are + * returned in a listing. + *

    + * Consult the directory_markers document for details on this feature, + * including terminology. + */ +public class DirMarkerTracker { + + private static final Logger LOG = + LoggerFactory.getLogger(DirMarkerTracker.class); + + /** + * all leaf markers. + */ + private final Map leafMarkers + = new TreeMap<>(); + + /** + * all surplus markers. + */ + private final Map surplusMarkers + = new TreeMap<>(); + + /** + * Base path of the tracking operation. + */ + private final Path basePath; + + /** + * Should surplus markers be recorded in + * the {@link #surplusMarkers} map? + */ + private final boolean recordSurplusMarkers; + + /** + * last parent directory checked. + */ + private Path lastDirChecked; + + /** + * Count of scans; used for test assertions. + */ + private int scanCount; + + /** + * How many files were found. + */ + private int filesFound; + + /** + * How many markers were found. + */ + private int markersFound; + + /** + * How many objects of any kind were found? + */ + private int objectsFound; + + /** + * Construct. + *

    + * The base path is currently only used for information rather than + * validating paths supplied in other methods. + * @param basePath base path of track + * @param recordSurplusMarkers save surplus markers to a map? + */ + public DirMarkerTracker(final Path basePath, + boolean recordSurplusMarkers) { + this.basePath = basePath; + this.recordSurplusMarkers = recordSurplusMarkers; + } + + /** + * Get the base path of the tracker. + * @return the path + */ + public Path getBasePath() { + return basePath; + } + + /** + * A marker has been found; this may or may not be a leaf. + *

    + * Trigger a move of all markers above it into the surplus map. + * @param path marker path + * @param key object key + * @param source listing source + * @return the surplus markers found. + */ + public List markerFound(Path path, + final String key, + final S3ALocatedFileStatus source) { + markersFound++; + leafMarkers.put(path, new Marker(path, key, source)); + return pathFound(path, key, source); + } + + /** + * A file has been found. Trigger a move of all + * markers above it into the surplus map. + * @param path marker path + * @param key object key + * @param source listing source + * @return the surplus markers found. + */ + public List fileFound(Path path, + final String key, + final S3ALocatedFileStatus source) { + filesFound++; + return pathFound(path, key, source); + } + + /** + * A path has been found. + *

    + * Declare all markers above it as surplus + * @param path marker path + * @param key object key + * @param source listing source + * @return the surplus markers found. + */ + private List pathFound(Path path, + final String key, + final S3ALocatedFileStatus source) { + objectsFound++; + List removed = new ArrayList<>(); + + // all parent entries are superfluous + final Path parent = path.getParent(); + if (parent == null || parent.equals(lastDirChecked)) { + // short cut exit + return removed; + } + removeParentMarkers(parent, removed); + lastDirChecked = parent; + return removed; + } + + /** + * Remove all markers from the path and its parents from the + * {@link #leafMarkers} map. + *

    + * if {@link #recordSurplusMarkers} is true, the marker is + * moved to the surplus map. Not doing this is simply an + * optimisation designed to reduce risk of excess memory consumption + * when renaming (hypothetically) large directory trees. + * @param path path to start at + * @param removed list of markers removed; is built up during the + * recursive operation. + */ + private void removeParentMarkers(final Path path, + List removed) { + if (path == null || path.isRoot()) { + return; + } + scanCount++; + removeParentMarkers(path.getParent(), removed); + final Marker value = leafMarkers.remove(path); + if (value != null) { + // marker is surplus + removed.add(value); + if (recordSurplusMarkers) { + surplusMarkers.put(path, value); + } + } + } + + /** + * Get the map of leaf markers. + * @return all leaf markers. + */ + public Map getLeafMarkers() { + return leafMarkers; + } + + /** + * Get the map of surplus markers. + *

    + * Empty if they were not being recorded. + * @return all surplus markers. + */ + public Map getSurplusMarkers() { + return surplusMarkers; + } + + public Path getLastDirChecked() { + return lastDirChecked; + } + + + /** + * How many objects were found. + * @return count + */ + public int getObjectsFound() { + return objectsFound; + } + + public int getScanCount() { + return scanCount; + } + + public int getFilesFound() { + return filesFound; + } + + public int getMarkersFound() { + return markersFound; + } + + @Override + public String toString() { + return "DirMarkerTracker{" + + "leafMarkers=" + leafMarkers.size() + + ", surplusMarkers=" + surplusMarkers.size() + + ", lastDirChecked=" + lastDirChecked + + ", filesFound=" + filesFound + + ", scanCount=" + scanCount + + '}'; + } + + /** + * Scan the surplus marker list and remove from it all where the directory + * policy says "keep". This is useful when auditing + * @param policy policy to use when auditing markers for + * inclusion/exclusion. + * @return list of markers stripped + */ + public List removeAllowedMarkers(DirectoryPolicy policy) { + List removed = new ArrayList<>(); + Iterator> entries = + surplusMarkers.entrySet().iterator(); + while (entries.hasNext()) { + Map.Entry entry = entries.next(); + Path path = entry.getKey(); + if (policy.keepDirectoryMarkers(path)) { + // there's a match + // remove it from the map. + entries.remove(); + LOG.debug("Removing {}", entry.getValue()); + removed.add(path); + } + } + return removed; + } + + /** + * This is a marker entry stored in the map and + * returned as markers are deleted. + */ + public static final class Marker { + /** Path of the marker. */ + private final Path path; + + /** + * Key in the store. + */ + private final String key; + + /** + * The file status of the marker. + */ + private final S3ALocatedFileStatus status; + + private Marker(final Path path, + final String key, + final S3ALocatedFileStatus status) { + this.path = path; + this.key = key; + this.status = status; + } + + public Path getPath() { + return path; + } + + public String getKey() { + return key; + } + + public S3ALocatedFileStatus getStatus() { + return status; + } + + /** + * Get the version ID of the status object; may be null. + * @return a version ID, if known. + */ + public String getVersionId() { + return status.getVersionId(); + } + + @Override + public String toString() { + return "Marker{" + + "path=" + path + + ", key='" + key + '\'' + + ", status=" + status + + '}'; + } + + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicy.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicy.java new file mode 100644 index 0000000000000..36dd2e4fd2496 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicy.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import org.apache.hadoop.fs.Path; + +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_AUTHORITATIVE; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP; + +/** + * Interface for Directory Marker policies to implement. + */ + +public interface DirectoryPolicy { + + + + /** + * Should a directory marker be retained? + * @param path path a file/directory is being created with. + * @return true if the marker MAY be kept, false if it MUST be deleted. + */ + boolean keepDirectoryMarkers(Path path); + + /** + * Get the marker policy. + * @return policy. + */ + MarkerPolicy getMarkerPolicy(); + + /** + * Describe the policy for marker tools and logs. + * @return description of the current policy. + */ + String describe(); + + /** + * Does a specific path have the relevant option. + * This is to be forwarded from the S3AFileSystem.hasPathCapability + * But only for those capabilities related to markers* + * @param path path + * @param capability capability + * @return true if the capability is supported, false if not + * @throws IllegalArgumentException if the capability is unknown. + */ + boolean hasPathCapability(Path path, String capability); + + /** + * Supported retention policies. + */ + enum MarkerPolicy { + + /** + * Delete markers. + *

    + * This is the classic S3A policy, + */ + Delete(DIRECTORY_MARKER_POLICY_DELETE), + + /** + * Keep markers. + *

    + * This is Not backwards compatible. + */ + Keep(DIRECTORY_MARKER_POLICY_KEEP), + + /** + * Keep markers in authoritative paths only. + *

    + * This is Not backwards compatible within the + * auth paths, but is outside these. + */ + Authoritative(DIRECTORY_MARKER_POLICY_AUTHORITATIVE); + + /** + * The name of the option as allowed in configuration files + * and marker-aware tooling. + */ + private final String optionName; + + MarkerPolicy(final String optionName) { + this.optionName = optionName; + } + + /** + * Get the option name. + * @return name of the option + */ + public String getOptionName() { + return optionName; + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicyImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicyImpl.java new file mode 100644 index 0000000000000..a1aa2580b655a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicyImpl.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + + +import java.util.EnumSet; +import java.util.Locale; +import java.util.Set; +import java.util.function.Predicate; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; + +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_DIRECTORY_MARKER_POLICY; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_AUTHORITATIVE; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP; +import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_AWARE; +import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE; +import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP; +import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE; +import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE; +import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_KEEP; + +/** + * Implementation of directory policy. + */ +public final class DirectoryPolicyImpl + implements DirectoryPolicy { + + private static final Logger LOG = LoggerFactory.getLogger( + DirectoryPolicyImpl.class); + + /** + * Error string when unable to parse the marker policy option. + */ + public static final String UNKNOWN_MARKER_POLICY = + "Unknown policy in " + + DIRECTORY_MARKER_POLICY + ": "; + + /** + * All available policies. + */ + private static final Set AVAILABLE_POLICIES = + EnumSet.allOf(MarkerPolicy.class); + + /** + * Keep all markers. + */ + public static final DirectoryPolicy KEEP = new DirectoryPolicyImpl( + MarkerPolicy.Keep, (p) -> false); + + /** + * Delete all markers. + */ + public static final DirectoryPolicy DELETE = new DirectoryPolicyImpl( + MarkerPolicy.Delete, (p) -> false); + + /** + * Chosen marker policy. + */ + private final MarkerPolicy markerPolicy; + + /** + * Callback to evaluate authoritativeness of a + * path. + */ + private final Predicate authoritativeness; + + /** + * Constructor. + * @param markerPolicy marker policy + * @param authoritativeness function for authoritativeness + */ + public DirectoryPolicyImpl(final MarkerPolicy markerPolicy, + final Predicate authoritativeness) { + this.markerPolicy = markerPolicy; + this.authoritativeness = authoritativeness; + } + + @Override + public boolean keepDirectoryMarkers(final Path path) { + switch (markerPolicy) { + case Keep: + return true; + case Authoritative: + return authoritativeness.test(path); + case Delete: + default: // which cannot happen + return false; + } + } + + @Override + public MarkerPolicy getMarkerPolicy() { + return markerPolicy; + } + + @Override + public String describe() { + return markerPolicy.getOptionName(); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "DirectoryMarkerRetention{"); + sb.append("policy='").append(markerPolicy.getOptionName()).append('\''); + sb.append('}'); + return sb.toString(); + } + + /** + * Return path policy for store and paths. + * @param path path + * @param capability capability + * @return true if a capability is active + */ + @Override + public boolean hasPathCapability(final Path path, final String capability) { + + switch (capability) { + /* + * Marker policy is dynamically determined for the given path. + */ + case STORE_CAPABILITY_DIRECTORY_MARKER_AWARE: + return true; + + case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_KEEP: + return markerPolicy == MarkerPolicy.Keep; + + case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE: + return markerPolicy == MarkerPolicy.Delete; + + case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE: + return markerPolicy == MarkerPolicy.Authoritative; + + case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP: + return keepDirectoryMarkers(path); + + case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE: + return !keepDirectoryMarkers(path); + + default: + throw new IllegalArgumentException("Unknown capability " + capability); + } + } + + /** + * Create/Get the policy for this configuration. + * @param conf config + * @param authoritativeness Callback to evaluate authoritativeness of a + * path. + * @return a policy + */ + public static DirectoryPolicy getDirectoryPolicy( + final Configuration conf, + final Predicate authoritativeness) { + DirectoryPolicy policy; + String option = conf.getTrimmed(DIRECTORY_MARKER_POLICY, + DEFAULT_DIRECTORY_MARKER_POLICY); + switch (option.toLowerCase(Locale.ENGLISH)) { + case DIRECTORY_MARKER_POLICY_DELETE: + // backwards compatible. + LOG.debug("Directory markers will be deleted"); + policy = DELETE; + break; + case DIRECTORY_MARKER_POLICY_KEEP: + LOG.info("Directory markers will be kept"); + policy = KEEP; + break; + case DIRECTORY_MARKER_POLICY_AUTHORITATIVE: + LOG.info("Directory markers will be kept on authoritative" + + " paths"); + policy = new DirectoryPolicyImpl(MarkerPolicy.Authoritative, + authoritativeness); + break; + default: + throw new IllegalArgumentException(UNKNOWN_MARKER_POLICY + option); + } + return policy; + } + + /** + * Enumerate all available policies. + * @return set of the policies. + */ + public static Set availablePolicies() { + return AVAILABLE_POLICIES; + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ErrorTranslation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ErrorTranslation.java index d1baf3c898735..f7e06413a3761 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ErrorTranslation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ErrorTranslation.java @@ -54,6 +54,18 @@ public static boolean isUnknownBucket(AmazonServiceException e) { && AwsErrorCodes.E_NO_SUCH_BUCKET.equals(e.getErrorCode()); } + /** + * Does this exception indicate that a reference to an object + * returned a 404. Unknown bucket errors do not match this + * predicate. + * @param e exception. + * @return true if the status code and error code mean that the + * HEAD request returned 404 but the bucket was there. + */ + public static boolean isObjectNotFound(AmazonServiceException e) { + return e.getStatusCode() == SC_404 && !isUnknownBucket(e); + } + /** * AWS error codes explicitly recognized and processes specially; * kept in their own class for isolation. diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ExecutingStoreOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ExecutingStoreOperation.java index ac6f6bfeed3c1..ba8e743f293cb 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ExecutingStoreOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ExecutingStoreOperation.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A subclass of {@link AbstractStoreOperation} which diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java new file mode 100644 index 0000000000000..5efec2b36dafe --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/HeaderProcessing.java @@ -0,0 +1,500 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import javax.annotation.Nullable; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.TreeMap; + +import com.amazonaws.services.s3.Headers; +import com.amazonaws.services.s3.model.ObjectMetadata; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; + +import static org.apache.hadoop.fs.s3a.Constants.XA_HEADER_PREFIX; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_OP_XATTR_LIST; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_XATTR_GET_MAP; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_XATTR_GET_NAMED; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_XATTR_GET_NAMED_MAP; +import static org.apache.hadoop.fs.s3a.commit.CommitConstants.X_HEADER_MAGIC_MARKER; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDuration; + +/** + * Part of the S3A FS where object headers are + * processed. + * Implements all the various XAttr read operations. + * Those APIs all expect byte arrays back. + * Metadata cloning is also implemented here, so as + * to stay in sync with custom header logic. + * + * The standard header names are extracted from the AWS SDK. + * The S3A connector does not (currently) support setting them, + * though it would be possible to do so through the createFile() + * builder API. + */ +public class HeaderProcessing extends AbstractStoreOperation { + + private static final Logger LOG = LoggerFactory.getLogger( + HeaderProcessing.class); + + /** + * An empty buffer. + */ + private static final byte[] EMPTY = new byte[0]; + + + /** + * Standard HTTP header found on some S3 objects: {@value}. + */ + public static final String XA_CACHE_CONTROL = + XA_HEADER_PREFIX + Headers.CACHE_CONTROL; + /** + * Standard HTTP header found on some S3 objects: {@value}. + */ + public static final String XA_CONTENT_DISPOSITION = + XA_HEADER_PREFIX + Headers.CONTENT_DISPOSITION; + + /** + * Standard HTTP header found on some S3 objects: {@value}. + */ + public static final String XA_CONTENT_ENCODING = + XA_HEADER_PREFIX + Headers.CONTENT_ENCODING; + + /** + * Standard HTTP header found on some S3 objects: {@value}. + */ + public static final String XA_CONTENT_LANGUAGE = + XA_HEADER_PREFIX + Headers.CONTENT_LANGUAGE; + + /** + * Length XAttr: {@value}. + */ + public static final String XA_CONTENT_LENGTH = + XA_HEADER_PREFIX + Headers.CONTENT_LENGTH; + + /** + * Standard HTTP header found on some S3 objects: {@value}. + */ + public static final String XA_CONTENT_MD5 = + XA_HEADER_PREFIX + Headers.CONTENT_MD5; + + /** + * Content range: {@value}. + * This is returned on GET requests with ranges. + */ + public static final String XA_CONTENT_RANGE = + XA_HEADER_PREFIX + Headers.CONTENT_RANGE; + + /** + * Content type: may be set when uploading. + * {@value}. + */ + public static final String XA_CONTENT_TYPE = + XA_HEADER_PREFIX + Headers.CONTENT_TYPE; + + /** + * Etag Header {@value}. + * Also accessible via {@code ObjectMetadata.getEtag()}, where + * it can be retrieved via {@code getFileChecksum(path)} if + * the S3A connector is enabled. + */ + public static final String XA_ETAG = XA_HEADER_PREFIX + Headers.ETAG; + + + /** + * last modified XAttr: {@value}. + */ + public static final String XA_LAST_MODIFIED = + XA_HEADER_PREFIX + Headers.LAST_MODIFIED; + + /* AWS Specific Headers. May not be found on other S3 endpoints. */ + + /** + * object archive status; empty if not on S3 Glacier + * (i.e all normal files should be non-archived as + * S3A and applications don't handle archived data) + * Value {@value}. + */ + public static final String XA_ARCHIVE_STATUS = + XA_HEADER_PREFIX + Headers.ARCHIVE_STATUS; + + /** + * Object legal hold status. {@value}. + */ + public static final String XA_OBJECT_LOCK_LEGAL_HOLD_STATUS = + XA_HEADER_PREFIX + Headers.OBJECT_LOCK_LEGAL_HOLD_STATUS; + + /** + * Object lock mode. {@value}. + */ + public static final String XA_OBJECT_LOCK_MODE = + XA_HEADER_PREFIX + Headers.OBJECT_LOCK_MODE; + + /** + * ISO8601 expiry date of object lock hold. {@value}. + */ + public static final String XA_OBJECT_LOCK_RETAIN_UNTIL_DATE = + XA_HEADER_PREFIX + Headers.OBJECT_LOCK_RETAIN_UNTIL_DATE; + + /** + * Replication status for cross-region replicated objects. {@value}. + */ + public static final String XA_OBJECT_REPLICATION_STATUS = + XA_HEADER_PREFIX + Headers.OBJECT_REPLICATION_STATUS; + + /** + * Version ID; empty for non-versioned buckets/data. {@value}. + */ + public static final String XA_S3_VERSION_ID = + XA_HEADER_PREFIX + Headers.S3_VERSION_ID; + + /** + * The server-side encryption algorithm to use + * with AWS-managed keys: {@value}. + */ + public static final String XA_SERVER_SIDE_ENCRYPTION = + XA_HEADER_PREFIX + Headers.SERVER_SIDE_ENCRYPTION; + + /** + * Storage Class XAttr: {@value}. + */ + public static final String XA_STORAGE_CLASS = + XA_HEADER_PREFIX + Headers.STORAGE_CLASS; + + /** + * Standard headers which are retrieved from HEAD Requests + * and set as XAttrs if the response included the relevant header. + */ + public static final String[] XA_STANDARD_HEADERS = { + /* HTTP standard headers */ + XA_CACHE_CONTROL, + XA_CONTENT_DISPOSITION, + XA_CONTENT_ENCODING, + XA_CONTENT_LANGUAGE, + XA_CONTENT_LENGTH, + XA_CONTENT_MD5, + XA_CONTENT_RANGE, + XA_CONTENT_TYPE, + XA_ETAG, + XA_LAST_MODIFIED, + /* aws headers */ + XA_ARCHIVE_STATUS, + XA_OBJECT_LOCK_LEGAL_HOLD_STATUS, + XA_OBJECT_LOCK_MODE, + XA_OBJECT_LOCK_RETAIN_UNTIL_DATE, + XA_OBJECT_REPLICATION_STATUS, + XA_S3_VERSION_ID, + XA_SERVER_SIDE_ENCRYPTION, + XA_STORAGE_CLASS, + }; + + /** + * Content type of generic binary objects. + * This is the default for uploaded objects. + */ + public static final String CONTENT_TYPE_OCTET_STREAM = + "application/octet-stream"; + + /** + * XML content type : {@value}. + * This is application/xml, not text/xml, and is + * what a HEAD of / returns as the type of a root path. + */ + public static final String CONTENT_TYPE_APPLICATION_XML = + "application/xml"; + + /** + * Construct. + * @param storeContext store context. + */ + public HeaderProcessing(final StoreContext storeContext) { + super(storeContext); + } + + /** + * Query the store, get all the headers into a map. Each Header + * has the "header." prefix. + * Caller must have read access. + * The value of each header is the string value of the object + * UTF-8 encoded. + * @param path path of object. + * @param statistic statistic to use for duration tracking. + * @return the headers + * @throws IOException failure, including file not found. + */ + private Map retrieveHeaders( + final Path path, + final Statistic statistic) throws IOException { + StoreContext context = getStoreContext(); + ContextAccessors accessors = context.getContextAccessors(); + String objectKey = accessors.pathToKey(path); + ObjectMetadata md; + String symbol = statistic.getSymbol(); + S3AStatisticsContext instrumentation = context.getInstrumentation(); + try { + md = trackDuration(instrumentation, symbol, () -> + accessors.getObjectMetadata(objectKey)); + } catch (FileNotFoundException e) { + // no entry. It could be a directory, so try again. + md = trackDuration(instrumentation, symbol, () -> + accessors.getObjectMetadata(objectKey + "/")); + } + // all user metadata + Map rawHeaders = md.getUserMetadata(); + Map headers = new TreeMap<>(); + rawHeaders.forEach((key, value) -> + headers.put(XA_HEADER_PREFIX + key, encodeBytes(value))); + + // and add the usual content length &c, if set + maybeSetHeader(headers, XA_CACHE_CONTROL, + md.getCacheControl()); + maybeSetHeader(headers, XA_CONTENT_DISPOSITION, + md.getContentDisposition()); + maybeSetHeader(headers, XA_CONTENT_ENCODING, + md.getContentEncoding()); + maybeSetHeader(headers, XA_CONTENT_LANGUAGE, + md.getContentLanguage()); + maybeSetHeader(headers, XA_CONTENT_LENGTH, + md.getContentLength()); + maybeSetHeader(headers, XA_CONTENT_MD5, + md.getContentMD5()); + maybeSetHeader(headers, XA_CONTENT_RANGE, + md.getContentRange()); + maybeSetHeader(headers, XA_CONTENT_TYPE, + md.getContentType()); + maybeSetHeader(headers, XA_ETAG, + md.getETag()); + maybeSetHeader(headers, XA_LAST_MODIFIED, + md.getLastModified()); + + // AWS custom headers + maybeSetHeader(headers, XA_ARCHIVE_STATUS, + md.getArchiveStatus()); + maybeSetHeader(headers, XA_OBJECT_LOCK_LEGAL_HOLD_STATUS, + md.getObjectLockLegalHoldStatus()); + maybeSetHeader(headers, XA_OBJECT_LOCK_MODE, + md.getObjectLockMode()); + maybeSetHeader(headers, XA_OBJECT_LOCK_RETAIN_UNTIL_DATE, + md.getObjectLockRetainUntilDate()); + maybeSetHeader(headers, XA_OBJECT_REPLICATION_STATUS, + md.getReplicationStatus()); + maybeSetHeader(headers, XA_S3_VERSION_ID, + md.getVersionId()); + maybeSetHeader(headers, XA_SERVER_SIDE_ENCRYPTION, + md.getSSEAlgorithm()); + maybeSetHeader(headers, XA_STORAGE_CLASS, + md.getStorageClass()); + maybeSetHeader(headers, XA_STORAGE_CLASS, + md.getReplicationStatus()); + return headers; + } + + /** + * Set a header if the value is non null. + * + * @param headers header map + * @param name header name + * @param value value to encode. + */ + private void maybeSetHeader( + final Map headers, + final String name, + final Object value) { + if (value != null) { + headers.put(name, encodeBytes(value)); + } + } + + /** + * Stringify an object and return its bytes in UTF-8 encoding. + * @param s source + * @return encoded object or an empty buffer + */ + public static byte[] encodeBytes(@Nullable Object s) { + return s == null + ? EMPTY + : s.toString().getBytes(StandardCharsets.UTF_8); + } + + /** + * Get the string value from the bytes. + * if null : return null, otherwise the UTF-8 decoded + * bytes. + * @param bytes source bytes + * @return decoded value + */ + public static String decodeBytes(byte[] bytes) { + return bytes == null + ? null + : new String(bytes, StandardCharsets.UTF_8); + } + + /** + * Get an XAttr name and value for a file or directory. + * @param path Path to get extended attribute + * @param name XAttr name. + * @return byte[] XAttr value or null + * @throws IOException IO failure + */ + public byte[] getXAttr(Path path, String name) throws IOException { + return retrieveHeaders(path, INVOCATION_XATTR_GET_NAMED).get(name); + } + + /** + * See {@code FileSystem.getXAttrs(path}. + * + * @param path Path to get extended attributes + * @return Map describing the XAttrs of the file or directory + * @throws IOException IO failure + */ + public Map getXAttrs(Path path) throws IOException { + return retrieveHeaders(path, INVOCATION_XATTR_GET_MAP); + } + + /** + * See {@code FileSystem.listXAttrs(path)}. + * @param path Path to get extended attributes + * @return List of supported XAttrs + * @throws IOException IO failure + */ + public List listXAttrs(final Path path) throws IOException { + return new ArrayList<>(retrieveHeaders(path, INVOCATION_OP_XATTR_LIST) + .keySet()); + } + + /** + * See {@code FileSystem.getXAttrs(path, names}. + * @param path Path to get extended attributes + * @param names XAttr names. + * @return Map describing the XAttrs of the file or directory + * @throws IOException IO failure + */ + public Map getXAttrs(Path path, List names) + throws IOException { + Map headers = retrieveHeaders(path, + INVOCATION_XATTR_GET_NAMED_MAP); + Map result = new TreeMap<>(); + headers.entrySet().stream() + .filter(entry -> names.contains(entry.getKey())) + .forEach(entry -> result.put(entry.getKey(), entry.getValue())); + return result; + } + + /** + * Convert an XAttr byte array to a long. + * testability. + * @param data data to parse + * @return either a length or none + */ + public static Optional extractXAttrLongValue(byte[] data) { + String xAttr; + xAttr = HeaderProcessing.decodeBytes(data); + if (StringUtils.isNotEmpty(xAttr)) { + try { + long l = Long.parseLong(xAttr); + if (l >= 0) { + return Optional.of(l); + } + } catch (NumberFormatException ex) { + LOG.warn("Not a number: {}", xAttr, ex); + } + } + // missing/empty header or parse failure. + return Optional.empty(); + } + + /** + * Creates a copy of the passed {@link ObjectMetadata}. + * Does so without using the {@link ObjectMetadata#clone()} method, + * to avoid copying unnecessary headers. + * This operation does not copy the {@code X_HEADER_MAGIC_MARKER} + * header to avoid confusion. If a marker file is renamed, + * it loses information about any remapped file. + * If new fields are added to ObjectMetadata which are not + * present in the user metadata headers, they will not be picked + * up or cloned unless this operation is updated. + * @param source the {@link ObjectMetadata} to copy + * @param dest the metadata to update; this is the return value. + */ + public void cloneObjectMetadata(ObjectMetadata source, + ObjectMetadata dest) { + + // Possibly null attributes + // Allowing nulls to pass breaks it during later use + if (source.getCacheControl() != null) { + dest.setCacheControl(source.getCacheControl()); + } + if (source.getContentDisposition() != null) { + dest.setContentDisposition(source.getContentDisposition()); + } + if (source.getContentEncoding() != null) { + dest.setContentEncoding(source.getContentEncoding()); + } + if (source.getContentMD5() != null) { + dest.setContentMD5(source.getContentMD5()); + } + if (source.getContentType() != null) { + dest.setContentType(source.getContentType()); + } + if (source.getExpirationTime() != null) { + dest.setExpirationTime(source.getExpirationTime()); + } + if (source.getExpirationTimeRuleId() != null) { + dest.setExpirationTimeRuleId(source.getExpirationTimeRuleId()); + } + if (source.getHttpExpiresDate() != null) { + dest.setHttpExpiresDate(source.getHttpExpiresDate()); + } + if (source.getLastModified() != null) { + dest.setLastModified(source.getLastModified()); + } + if (source.getOngoingRestore() != null) { + dest.setOngoingRestore(source.getOngoingRestore()); + } + if (source.getRestoreExpirationTime() != null) { + dest.setRestoreExpirationTime(source.getRestoreExpirationTime()); + } + if (source.getSSEAlgorithm() != null) { + dest.setSSEAlgorithm(source.getSSEAlgorithm()); + } + if (source.getSSECustomerAlgorithm() != null) { + dest.setSSECustomerAlgorithm(source.getSSECustomerAlgorithm()); + } + if (source.getSSECustomerKeyMd5() != null) { + dest.setSSECustomerKeyMd5(source.getSSECustomerKeyMd5()); + } + + // copy user metadata except the magic marker header. + source.getUserMetadata().entrySet().stream() + .filter(e -> !e.getKey().equals(X_HEADER_MAGIC_MARKER)) + .forEach(e -> dest.addUserMetadata(e.getKey(), e.getValue())); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java index c73580d19fccd..a5ce1f68ad3fc 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/InternalConstants.java @@ -23,6 +23,8 @@ import java.util.HashSet; import java.util.Set; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.s3a.Constants; @@ -93,4 +95,20 @@ private InternalConstants() { /** Directory marker attribute: see HADOOP-16613. Value: {@value}. */ public static final String X_DIRECTORY = "application/x-directory"; + + /** + * A configuration option for test use only: maximum + * part count on block writes/uploads. + * Value: {@value}. + */ + @VisibleForTesting + public static final String UPLOAD_PART_COUNT_LIMIT = + "fs.s3a.internal.upload.part.count.limit"; + + /** + * Maximum entries you can upload in a single file write/copy/upload. + * Value: {@value}. + */ + public static final int DEFAULT_UPLOAD_PART_COUNT_LIMIT = 10000; + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ListingOperationCallbacks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ListingOperationCallbacks.java new file mode 100644 index 0000000000000..b0ebf1ca16975 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/ListingOperationCallbacks.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.Retries; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3ALocatedFileStatus; +import org.apache.hadoop.fs.s3a.S3ListRequest; +import org.apache.hadoop.fs.s3a.S3ListResult; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; + +/** + * These are all the callbacks which + * {@link org.apache.hadoop.fs.s3a.Listing} operations + * need, derived from the actual appropriate S3AFileSystem + * methods. + */ +public interface ListingOperationCallbacks { + + /** + * Initiate a {@code listObjectsAsync} operation, incrementing metrics + * in the process. + * + * Retry policy: retry untranslated. + * @param request request to initiate + * @param trackerFactory tracker with statistics to update + * @return the results + * @throws IOException if the retry invocation raises one (it shouldn't). + */ + @Retries.RetryRaw + CompletableFuture listObjectsAsync( + S3ListRequest request, + DurationTrackerFactory trackerFactory) + throws IOException; + + /** + * List the next set of objects. + * Retry policy: retry untranslated. + * @param request last list objects request to continue + * @param prevResult last paged result to continue from + * @param trackerFactory tracker with statistics to update + * @return the next result object + * @throws IOException none, just there for retryUntranslated. + */ + @Retries.RetryRaw + CompletableFuture continueListObjectsAsync( + S3ListRequest request, + S3ListResult prevResult, + DurationTrackerFactory trackerFactory) + throws IOException; + + /** + * Build a {@link S3ALocatedFileStatus} from a {@link FileStatus} instance. + * @param status file status + * @return a located status with block locations set up from this FS. + * @throws IOException IO Problems. + */ + S3ALocatedFileStatus toLocatedFileStatus( + S3AFileStatus status) + throws IOException; + /** + * Create a {@code ListObjectsRequest} request against this bucket, + * with the maximum keys returned in a query set by + * {@link #getMaxKeys()}. + * @param key key for request + * @param delimiter any delimiter + * @return the request + */ + S3ListRequest createListObjectsRequest( + String key, + String delimiter); + + + /** + * Return the number of bytes that large input files should be optimally + * be split into to minimize I/O time. The given path will be used to + * locate the actual filesystem. The full path does not have to exist. + * @param path path of file + * @return the default block size for the path's filesystem + */ + long getDefaultBlockSize(Path path); + + /** + * Get the maximum key count. + * @return a value, valid after initialization + */ + int getMaxKeys(); + + /** + * Get the updated time provider for the current fs instance. + * @return implementation of {@link ITtlTimeProvider} + */ + ITtlTimeProvider getUpdatedTtlTimeProvider(); + + /** + * Is the path for this instance considered authoritative on the client, + * that is: will listing/status operations only be handled by the metastore, + * with no fallback to S3. + * @param p path + * @return true iff the path is authoritative on the client. + */ + boolean allowAuthoritative(Path p); + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java index e79eeb86b292d..0a8338934c9ef 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MultiObjectDeleteSupport.java @@ -23,12 +23,13 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.MultiObjectDeleteException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,10 +38,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.AWSS3IOException; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.Tristate; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Support for Multi Object Deletion. @@ -84,15 +87,25 @@ public MultiObjectDeleteSupport(final StoreContext context, public static IOException translateDeleteException( final String message, final MultiObjectDeleteException deleteException) { + List errors + = deleteException.getErrors(); + LOG.warn("Bulk delete operation failed to delete all objects;" + + " failure count = {}", + errors.size()); final StringBuilder result = new StringBuilder( - deleteException.getErrors().size() * 256); + errors.size() * 256); result.append(message).append(": "); String exitCode = ""; for (MultiObjectDeleteException.DeleteError error : deleteException.getErrors()) { String code = error.getCode(); - result.append(String.format("%s: %s: %s%n", code, error.getKey(), - error.getMessage())); + String item = String.format("%s: %s%s: %s%n", code, error.getKey(), + (error.getVersionId() != null + ? (" (" + error.getVersionId() + ")") + : ""), + error.getMessage()); + LOG.warn(item); + result.append(item); if (exitCode.isEmpty() || ACCESS_DENIED.equals(code)) { exitCode = code; } @@ -113,7 +126,7 @@ public static IOException translateDeleteException( * @param keysToDelete the keys in the delete request * @return tuple of (undeleted, deleted) paths. */ - public Pair, List> splitUndeletedKeys( + public Pair, List> splitUndeletedKeys( final MultiObjectDeleteException deleteException, final Collection keysToDelete) { LOG.debug("Processing delete failure; keys to delete count = {};" @@ -122,11 +135,11 @@ public Pair, List> splitUndeletedKeys( deleteException.getErrors().size(), deleteException.getDeletedObjects().size()); // convert the collection of keys being deleted into paths - final List pathsBeingDeleted = keysToPaths(keysToDelete); - // Take this is list of paths + final List pathsBeingDeleted = keysToKeyPaths(keysToDelete); + // Take this ist of paths // extract all undeleted entries contained in the exception and - // then removes them from the original list. - List undeleted = removeUndeletedPaths(deleteException, + // then remove them from the original list. + List undeleted = removeUndeletedPaths(deleteException, pathsBeingDeleted, getStoreContext()::keyToPath); return Pair.of(undeleted, pathsBeingDeleted); @@ -139,7 +152,17 @@ public Pair, List> splitUndeletedKeys( */ public List keysToPaths( final Collection keysToDelete) { - return convertToPaths(keysToDelete, + return toPathList(keysToKeyPaths(keysToDelete)); + } + + /** + * Given a list of delete requests, convert them all to keypaths. + * @param keysToDelete list of keys for the delete operation. + * @return list of keypath entries + */ + public List keysToKeyPaths( + final Collection keysToDelete) { + return convertToKeyPaths(keysToDelete, getStoreContext()::keyToPath); } @@ -149,13 +172,17 @@ public List keysToPaths( * @param qualifier path qualifier * @return the paths. */ - public static List convertToPaths( + public static List convertToKeyPaths( final Collection keysToDelete, final Function qualifier) { - return keysToDelete.stream() - .map((keyVersion) -> - qualifier.apply(keyVersion.getKey())) - .collect(Collectors.toList()); + List l = new ArrayList<>(keysToDelete.size()); + for (DeleteObjectsRequest.KeyVersion kv : keysToDelete) { + String key = kv.getKey(); + Path p = qualifier.apply(key); + boolean isDir = key.endsWith("/"); + l.add(new KeyPath(key, p, isDir)); + } + return l; } /** @@ -164,27 +191,59 @@ public static List convertToPaths( * and the original list of files to delete declares to have been deleted. * @param deleteException the delete exception. * @param keysToDelete collection of keys which had been requested. + * @param retainedMarkers list built up of retained markers. * @return a tuple of (undeleted, deleted, failures) */ public Triple, List, List>> processDeleteFailure( final MultiObjectDeleteException deleteException, - final List keysToDelete) { + final List keysToDelete, + final List retainedMarkers) { final MetadataStore metadataStore = checkNotNull(getStoreContext().getMetadataStore(), "context metadatastore"); final List> failures = new ArrayList<>(); - final Pair, List> outcome = + final Pair, List> outcome = splitUndeletedKeys(deleteException, keysToDelete); - List deleted = outcome.getRight(); - List undeleted = outcome.getLeft(); - // delete the paths but recover - // TODO: handle the case where a parent path is deleted but not a child. - // TODO: in a fake object delete, we don't actually want to delete - // metastore entries - deleted.forEach(path -> { - try { - metadataStore.delete(path, operationState); + List deleted = outcome.getRight(); + List deletedPaths = new ArrayList<>(); + List undeleted = outcome.getLeft(); + retainedMarkers.clear(); + List undeletedPaths = toPathList((List) undeleted); + // sort shorter keys first, + // so that if the left key is longer than the first it is considered + // smaller, so appears in the list first. + // thus when we look for a dir being empty, we know it holds + deleted.sort((l, r) -> r.getKey().length() - l.getKey().length()); + + // now go through and delete from S3Guard all paths listed in + // the result which are either files or directories with + // no children. + deleted.forEach(kp -> { + Path path = kp.getPath(); + try{ + boolean toDelete = true; + if (kp.isDirectoryMarker()) { + // its a dir marker, which could be an empty dir + // (which is then tombstoned), or a non-empty dir, which + // is not tombstoned. + // for this to be handled, we have to have removed children + // from the store first, which relies on the sort + PathMetadata pmentry = metadataStore.get(path, true); + if (pmentry != null && !pmentry.isDeleted()) { + toDelete = pmentry.getFileStatus().isEmptyDirectory() + == Tristate.TRUE; + } else { + toDelete = false; + } + } + if (toDelete) { + LOG.debug("Removing deleted object from S3Guard Store {}", path); + metadataStore.delete(path, operationState); + } else { + LOG.debug("Retaining S3Guard directory entry {}", path); + retainedMarkers.add(path); + } } catch (IOException e) { // trouble: we failed to delete the far end entry // try with the next one. @@ -192,11 +251,25 @@ public static List convertToPaths( LOG.warn("Failed to update S3Guard store with deletion of {}", path); failures.add(Pair.of(path, e)); } + // irrespective of the S3Guard outcome, it is declared as deleted, as + // it is no longer in the S3 store. + deletedPaths.add(path); }); if (LOG.isDebugEnabled()) { undeleted.forEach(p -> LOG.debug("Deleted {}", p)); } - return Triple.of(undeleted, deleted, failures); + return Triple.of(undeletedPaths, deletedPaths, failures); + } + + /** + * Given a list of keypaths, convert to a list of paths. + * @param keyPaths source list + * @return a listg of paths + */ + public static List toPathList(final List keyPaths) { + return keyPaths.stream() + .map(KeyPath::getPath) + .collect(Collectors.toList()); } /** @@ -211,8 +284,31 @@ public static List convertToPaths( public static List extractUndeletedPaths( final MultiObjectDeleteException deleteException, final Function qualifierFn) { - return deleteException.getErrors().stream() - .map((e) -> qualifierFn.apply(e.getKey())) + return toPathList(extractUndeletedKeyPaths(deleteException, qualifierFn)); + } + + /** + * Build a list of undeleted paths from a {@code MultiObjectDeleteException}. + * Outside of unit tests, the qualifier function should be + * {@link S3AFileSystem#keyToQualifiedPath(String)}. + * @param deleteException the delete exception. + * @param qualifierFn function to qualify paths + * @return the possibly empty list of paths. + */ + @VisibleForTesting + public static List extractUndeletedKeyPaths( + final MultiObjectDeleteException deleteException, + final Function qualifierFn) { + + List errors + = deleteException.getErrors(); + return errors.stream() + .map((error) -> { + String key = error.getKey(); + Path path = qualifierFn.apply(key); + boolean isDir = key.endsWith("/"); + return new KeyPath(key, path, isDir); + }) .collect(Collectors.toList()); } @@ -227,12 +323,17 @@ public static List extractUndeletedPaths( * @return the list of undeleted entries */ @VisibleForTesting - static List removeUndeletedPaths( + static List removeUndeletedPaths( final MultiObjectDeleteException deleteException, - final Collection pathsBeingDeleted, + final Collection pathsBeingDeleted, final Function qualifier) { - List undeleted = extractUndeletedPaths(deleteException, qualifier); - pathsBeingDeleted.removeAll(undeleted); + // get the undeleted values + List undeleted = extractUndeletedKeyPaths(deleteException, + qualifier); + // and remove them from the undeleted list, matching on key + for (KeyPath undel : undeleted) { + pathsBeingDeleted.removeIf(kp -> kp.getPath().equals(undel.getPath())); + } return undeleted; } @@ -247,4 +348,70 @@ public List processDeleteFailureGenericException(Exception ex, final List keysToDelete) { return keysToPaths(keysToDelete); } + + /** + * Representation of a (key, path) which couldn't be deleted; + * the dir marker flag is inferred from the key suffix. + *

    + * Added because Pairs of Lists of Triples was just too complex + * for Java code. + *

    + */ + public static final class KeyPath { + /** Key in bucket. */ + private final String key; + /** Full path. */ + private final Path path; + /** Is this a directory marker? */ + private final boolean directoryMarker; + + public KeyPath(final String key, + final Path path, + final boolean directoryMarker) { + this.key = key; + this.path = path; + this.directoryMarker = directoryMarker; + } + + public String getKey() { + return key; + } + + public Path getPath() { + return path; + } + + public boolean isDirectoryMarker() { + return directoryMarker; + } + + @Override + public String toString() { + return "KeyPath{" + + "key='" + key + '\'' + + ", path=" + path + + ", directoryMarker=" + directoryMarker + + '}'; + } + + /** + * Equals test is on key alone. + */ + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + KeyPath keyPath = (KeyPath) o; + return key.equals(keyPath.key); + } + + @Override + public int hashCode() { + return Objects.hash(key); + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/NetworkBinding.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/NetworkBinding.java index 7ff44510011c0..409ac7bebc488 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/NetworkBinding.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/NetworkBinding.java @@ -19,91 +19,95 @@ package org.apache.hadoop.fs.s3a.impl; import java.io.IOException; -import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; - -import javax.net.ssl.HostnameVerifier; -import javax.net.ssl.SSLSocketFactory; +import java.net.URI; +import java.net.URISyntaxException; import com.amazonaws.ClientConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_ENDPOINT; import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_SSL_CHANNEL_MODE; +import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT; import static org.apache.hadoop.fs.s3a.Constants.SSL_CHANNEL_MODE; /** * Configures network settings when communicating with AWS services. */ -public class NetworkBinding { +public final class NetworkBinding { private static final Logger LOG = LoggerFactory.getLogger(NetworkBinding.class); - private static final String AWS_SOCKET_FACTORY_CLASSNAME = "com.amazonaws" + - ".thirdparty.apache.http.conn.ssl.SSLConnectionSocketFactory"; + private static final String BINDING_CLASSNAME = "org.apache.hadoop.fs.s3a.impl.ConfigureShadedAWSSocketFactory"; + + private NetworkBinding() { + } /** - * Configures the {@link com.amazonaws.thirdparty.apache.http.conn.ssl - * .SSLConnectionSocketFactory} used by the AWS SDK. A custom - * SSLConnectionSocketFactory can be set using the method - * {@link com.amazonaws.ApacheHttpClientConfig#setSslSocketFactory( - * com.amazonaws.thirdparty.apache.http.conn.socket.ConnectionSocketFactory)}. - * If {@link com.amazonaws.thirdparty.apache.http.conn.ssl - * .SSLConnectionSocketFactory} cannot be found on the classpath, the value - * of {@link org.apache.hadoop.fs.s3a.Constants#SSL_CHANNEL_MODE} is ignored. - * + * Configures the {@code SSLConnectionSocketFactory} used by the AWS SDK. + * A custom Socket Factory can be set using the method + * {@code setSslSocketFactory()}. + * Uses reflection to do this via {@link ConfigureShadedAWSSocketFactory} + * so as to avoid * @param conf the {@link Configuration} used to get the client specified - * value of {@link org.apache.hadoop.fs.s3a.Constants - * #SSL_CHANNEL_MODE} - * @param awsConf the {@link ClientConfiguration} to set the + * value of {@code SSL_CHANNEL_MODE} + * @param awsConf the {@code ClientConfiguration} to set the * SSLConnectionSocketFactory for. * @throws IOException if there is an error while initializing the - * {@link SSLSocketFactory}. + * {@code SSLSocketFactory} other than classloader problems. */ public static void bindSSLChannelMode(Configuration conf, ClientConfiguration awsConf) throws IOException { - try { - // Validate that SSL_CHANNEL_MODE is set to a valid value. - String channelModeString = conf.get( - SSL_CHANNEL_MODE, DEFAULT_SSL_CHANNEL_MODE.name()); - DelegatingSSLSocketFactory.SSLChannelMode channelMode = null; - for (DelegatingSSLSocketFactory.SSLChannelMode mode : - DelegatingSSLSocketFactory.SSLChannelMode.values()) { - if (mode.name().equalsIgnoreCase(channelModeString)) { - channelMode = mode; - } - } - if (channelMode == null) { - throw new IllegalArgumentException(channelModeString + - " is not a valid value for " + SSL_CHANNEL_MODE); + + // Validate that SSL_CHANNEL_MODE is set to a valid value. + String channelModeString = conf.getTrimmed( + SSL_CHANNEL_MODE, DEFAULT_SSL_CHANNEL_MODE.name()); + DelegatingSSLSocketFactory.SSLChannelMode channelMode = null; + for (DelegatingSSLSocketFactory.SSLChannelMode mode : + DelegatingSSLSocketFactory.SSLChannelMode.values()) { + if (mode.name().equalsIgnoreCase(channelModeString)) { + channelMode = mode; } + } + if (channelMode == null) { + throw new IllegalArgumentException(channelModeString + + " is not a valid value for " + SSL_CHANNEL_MODE); + } - // Look for AWS_SOCKET_FACTORY_CLASSNAME on the classpath and instantiate - // an instance using the DelegatingSSLSocketFactory as the - // SSLSocketFactory. - Class sslConnectionSocketFactory = Class.forName( - AWS_SOCKET_FACTORY_CLASSNAME); - Constructor factoryConstructor = - sslConnectionSocketFactory.getDeclaredConstructor( - SSLSocketFactory.class, HostnameVerifier.class); - DelegatingSSLSocketFactory.initializeDefaultFactory(channelMode); - awsConf.getApacheHttpClientConfig().setSslSocketFactory( - (com.amazonaws.thirdparty.apache.http.conn.ssl. - SSLConnectionSocketFactory) factoryConstructor - .newInstance(DelegatingSSLSocketFactory - .getDefaultFactory(), - (HostnameVerifier) null)); + DelegatingSSLSocketFactory.initializeDefaultFactory(channelMode); + try { + // use reflection to load in our own binding class. + // this is *probably* overkill, but it is how we can be fully confident + // that no attempt will be made to load/link to the AWS Shaded SDK except + // within this try/catch block + Class clazz = + (Class) Class.forName(BINDING_CLASSNAME); + clazz.getConstructor() + .newInstance() + .configureSocketFactory(awsConf, channelMode); } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InstantiationException | - InvocationTargetException e) { + InvocationTargetException | LinkageError e) { LOG.debug("Unable to create class {}, value of {} will be ignored", - AWS_SOCKET_FACTORY_CLASSNAME, SSL_CHANNEL_MODE, e); + BINDING_CLASSNAME, SSL_CHANNEL_MODE, e); } } + /** + * Interface used to bind to the socket factory, allows the code which + * works with the shaded AWS libraries to exist in their own class. + */ + interface ConfigureAWSSocketFactory { + void configureSocketFactory(ClientConfiguration awsConf, + DelegatingSSLSocketFactory.SSLChannelMode channelMode) + throws IOException; + } + /** * Given an S3 bucket region as returned by a bucket location query, * fix it into a form which can be used by other AWS commands. @@ -121,4 +125,30 @@ public static String fixBucketRegion(final String region) { ? "us-east-1" : region; } + + /** + * Log the dns address associated with s3 endpoint. If endpoint is + * not set in the configuration, the {@code Constants#DEFAULT_ENDPOINT} + * will be used. + * @param conf input configuration. + */ + public static void logDnsLookup(Configuration conf) { + String endPoint = conf.getTrimmed(ENDPOINT, DEFAULT_ENDPOINT); + String hostName = endPoint; + if (!endPoint.isEmpty() && LOG.isDebugEnabled()) { + // Updating the hostname if there is a scheme present. + if (endPoint.contains("://")) { + try { + URI uri = new URI(endPoint); + hostName = uri.getHost(); + } catch (URISyntaxException e) { + LOG.debug("Got URISyntaxException, ignoring"); + } + } + LOG.debug("Bucket endpoint : {}, Hostname : {}, DNSAddress : {}", + endPoint, + hostName, + NetUtils.normalizeHostName(hostName)); + } + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/OperationCallbacks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/OperationCallbacks.java index 0fcf6454c11fb..3391097fc5730 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/OperationCallbacks.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/OperationCallbacks.java @@ -105,7 +105,7 @@ void deleteObjectAtPath(Path path, throws IOException; /** - * Recursive list of files and empty directories. + * Recursive list of files and directory markers. * * @param path path to list from * @param status optional status of path to list. @@ -115,7 +115,7 @@ void deleteObjectAtPath(Path path, * @throws IOException failure */ @Retries.RetryTranslated - RemoteIterator listFilesAndEmptyDirectories( + RemoteIterator listFilesAndDirectoryMarkers( Path path, S3AFileStatus status, boolean collectTombstones, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java index 750aebf500a4b..5890ac01a9e74 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java @@ -21,13 +21,14 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicLong; import com.amazonaws.AmazonClientException; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.transfer.model.CopyResult; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,8 +44,9 @@ import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.RenameTracker; import org.apache.hadoop.util.DurationInfo; +import org.apache.hadoop.util.OperationDuration; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_BLOCK_SIZE; import static org.apache.hadoop.fs.s3a.S3AUtils.objectRepresentsDirectory; import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; @@ -55,19 +57,31 @@ /** * A parallelized rename operation which updates the metastore in the * process, through whichever {@link RenameTracker} the store provides. + *

    * The parallel execution is in groups of size * {@link InternalConstants#RENAME_PARALLEL_LIMIT}; it is only * after one group completes that the next group is initiated. + *

    * Once enough files have been copied that they meet the * {@link InternalConstants#MAX_ENTRIES_TO_DELETE} threshold, a delete * is initiated. * If it succeeds, the rename continues with the next group of files. - * + *

    * The RenameTracker has the task of keeping the metastore up to date * as the rename proceeds. - * + *

    + * Directory Markers which have child entries are never copied; only those + * which represent empty directories are copied in the rename. + * The {@link DirMarkerTracker} tracks which markers must be copied, and + * which can simply be deleted from the source. + * As a result: rename always purges all non-leaf directory markers from + * the copied tree. This is to ensure that even if a directory tree + * is copied from an authoritative path to a non-authoritative one + * there is never any contamination of the non-auth path with markers. + *

    * The rename operation implements the classic HDFS rename policy of * rename(file, dir) renames the file under the directory. + *

    * * There is no validation of input and output paths. * Callers are required to themselves verify that destination is not under @@ -183,14 +197,69 @@ private void completeActiveCopies(String reason) throws IOException { /** * Queue an object for deletion. - * @param path path to the object + *

    + * This object will be deleted when the next page of objects to delete + * is posted to S3. Therefore, the COPY must have finished + * before that deletion operation takes place. + * This is managed by: + *
      + *
    1. + * The delete operation only being executed once all active + * copies have completed. + *
    2. + *
    3. + * Only queuing objects here whose copy operation has + * been submitted and so is in that thread pool. + *
    4. + *
    5. + * If a path is supplied, then after the delete is executed + * (and completes) the rename tracker from S3Guard will be + * told of its deletion. Do not set this for directory + * markers with children, as it may mistakenly add + * tombstones into the table. + *
    6. + *
    + * This method must only be called from the primary thread. + * @param path path to the object. * @param key key of the object. */ private void queueToDelete(Path path, String key) { - pathsToDelete.add(path); + LOG.debug("Queueing to delete {}", path); + if (path != null) { + pathsToDelete.add(path); + } keysToDelete.add(new DeleteObjectsRequest.KeyVersion(key)); } + /** + * Queue a list of markers for deletion. + *

    + * no-op if the list is empty. + *

    + * See {@link #queueToDelete(Path, String)} for + * details on safe use of this method. + * + * @param markersToDelete markers + */ + private void queueToDelete( + List markersToDelete) { + markersToDelete.forEach(m -> queueToDelete( + null, + m.getKey())); + } + + /** + * Queue a single marker for deletion. + *

    + * See {@link #queueToDelete(Path, String)} for + * details on safe use of this method. + * + * @param marker markers + */ + private void queueToDelete(final DirMarkerTracker.Marker marker) { + queueToDelete(marker.getPath(), marker.getKey()); + } + /** * Block waiting for ay active copies to finish * then delete all queued keys + paths to delete. @@ -225,11 +294,19 @@ public Long execute() throws IOException { storeContext, sourcePath, sourceStatus, destPath); + // The path to whichever file or directory is created by the + // rename. When deleting markers all parents of + // this path will need their markers pruned. + Path destCreated = destPath; // Ok! Time to start try { if (sourceStatus.isFile()) { - renameFileToDest(); + // rename the file. The destination path will be different + // from that passed in if the destination is a directory; + // the final value is needed to completely delete parent markers + // when they are not being retained. + destCreated = renameFileToDest(); } else { recursiveDirectoryRename(); } @@ -254,15 +331,17 @@ public Long execute() throws IOException { // Tell the metastore this fact and let it complete its changes renameTracker.completeRename(); - callbacks.finishRename(sourcePath, destPath); + callbacks.finishRename(sourcePath, destCreated); return bytesCopied.get(); } /** - * The source is a file: rename it to the destination. + * The source is a file: rename it to the destination, which + * will be under the current destination path if that is a directory. + * @return the path of the object created. * @throws IOException failure */ - protected void renameFileToDest() throws IOException { + protected Path renameFileToDest() throws IOException { final StoreContext storeContext = getStoreContext(); // the source is a file. Path copyDestinationPath = destPath; @@ -295,12 +374,14 @@ protected void renameFileToDest() throws IOException { callbacks.deleteObjectAtPath(sourcePath, sourceKey, true, null); // and update the tracker renameTracker.sourceObjectsDeleted(Lists.newArrayList(sourcePath)); + return copyDestinationPath; } /** * Execute a full recursive rename. - * The source is a file: rename it to the destination. - * @throws IOException failure + * There is a special handling of directly markers here -only leaf markers + * are copied. This reduces incompatibility "regions" across versions. +Are * @throws IOException failure */ protected void recursiveDirectoryRename() throws IOException { final StoreContext storeContext = getStoreContext(); @@ -325,18 +406,28 @@ protected void recursiveDirectoryRename() throws IOException { // marker. LOG.debug("Deleting fake directory marker at destination {}", destStatus.getPath()); + // Although the dir marker policy doesn't always need to do this, + // it's simplest just to be consistent here. + // note: updates the metastore as well a S3. callbacks.deleteObjectAtPath(destStatus.getPath(), dstKey, false, null); } Path parentPath = storeContext.keyToPath(srcKey); + + // Track directory markers so that we know which leaf directories need to be + // recreated + DirMarkerTracker dirMarkerTracker = new DirMarkerTracker(parentPath, + false); + final RemoteIterator iterator = - callbacks.listFilesAndEmptyDirectories(parentPath, + callbacks.listFilesAndDirectoryMarkers(parentPath, sourceStatus, true, true); while (iterator.hasNext()) { // get the next entry in the listing. S3ALocatedFileStatus child = iterator.next(); + LOG.debug("To rename {}", child); // convert it to an S3 key. String k = storeContext.pathToKey(child.getPath()); // possibly adding a "/" if it represents directory and it does @@ -347,36 +438,45 @@ protected void recursiveDirectoryRename() throws IOException { // the source object to copy as a path. Path childSourcePath = storeContext.keyToPath(key); - // mark for deletion on a successful copy. - queueToDelete(childSourcePath, key); - - // the destination key is that of the key under the source tree, - // remapped under the new destination path. - String newDestKey = - dstKey + key.substring(srcKey.length()); - Path childDestPath = storeContext.keyToPath(newDestKey); - - // now begin the single copy - CompletableFuture copy = initiateCopy(child, key, - childSourcePath, newDestKey, childDestPath); - activeCopies.add(copy); - bytesCopied.addAndGet(sourceStatus.getLen()); + List markersToDelete; - if (activeCopies.size() == RENAME_PARALLEL_LIMIT) { - // the limit of active copies has been reached; - // wait for completion or errors to surface. - LOG.debug("Waiting for active copies to complete"); - completeActiveCopies("batch threshold reached"); - } - if (keysToDelete.size() == pageSize) { - // finish ongoing copies then delete all queued keys. - // provided the parallel limit is a factor of the max entry - // constant, this will not need to block for the copy, and - // simply jump straight to the delete. - completeActiveCopiesAndDeleteSources("paged delete"); + boolean isMarker = key.endsWith("/"); + if (isMarker) { + // add the marker to the tracker. + // it will not be deleted _yet_ but it may find a list of parent + // markers which may now be deleted. + markersToDelete = dirMarkerTracker.markerFound( + childSourcePath, key, child); + } else { + // it is a file. + // note that it has been found -this may find a list of parent + // markers which may now be deleted. + markersToDelete = dirMarkerTracker.fileFound( + childSourcePath, key, child); + // the destination key is that of the key under the source tree, + // remapped under the new destination path. + String newDestKey = + dstKey + key.substring(srcKey.length()); + Path childDestPath = storeContext.keyToPath(newDestKey); + + // mark the source file for deletion on a successful copy. + queueToDelete(childSourcePath, key); + // now begin the single copy + CompletableFuture copy = initiateCopy(child, key, + childSourcePath, newDestKey, childDestPath); + activeCopies.add(copy); + bytesCopied.addAndGet(sourceStatus.getLen()); } + // add any markers to delete to the operation so they get cleaned + // incrementally + queueToDelete(markersToDelete); + // and trigger any end of loop operations + endOfLoopActions(); } // end of iteration through the list + // finally process remaining directory markers + copyEmptyDirectoryMarkers(srcKey, dstKey, dirMarkerTracker); + // await the final set of copies and their deletion // This will notify the renameTracker that these objects // have been deleted. @@ -387,6 +487,93 @@ protected void recursiveDirectoryRename() throws IOException { renameTracker.moveSourceDirectory(); } + /** + * Operations to perform at the end of every loop iteration. + *

    + * This may block the thread waiting for copies to complete + * and/or delete a page of data. + */ + private void endOfLoopActions() throws IOException { + if (keysToDelete.size() == pageSize) { + // finish ongoing copies then delete all queued keys. + completeActiveCopiesAndDeleteSources("paged delete"); + } else { + if (activeCopies.size() == RENAME_PARALLEL_LIMIT) { + // the limit of active copies has been reached; + // wait for completion or errors to surface. + LOG.debug("Waiting for active copies to complete"); + completeActiveCopies("batch threshold reached"); + } + } + } + + /** + * Process all directory markers at the end of the rename. + * All leaf markers are queued to be copied in the store; + * this updates the metastore tracker as it does so. + *

    + * Why not simply create new markers? All the metadata + * gets copied too, so if there was anything relevant then + * it would be preserved. + *

    + * At the same time: markers aren't valued much and may + * be deleted without any safety checks -so if there was relevant + * data it is at risk of destruction at any point. + * If there are lots of empty directory rename operations taking place, + * the decision to copy the source may need revisiting. + * Be advised though: the costs of the copy not withstanding, + * it is a lot easier to have one single type of scheduled copy operation + * than have copy and touch calls being scheduled. + *

    + * The duration returned is the time to initiate all copy/delete operations, + * including any blocking waits for active copies and paged deletes + * to execute. There may still be outstanding operations + * queued by this method -the duration may be an underestimate + * of the time this operation actually takes. + * + * @param srcKey source key with trailing / + * @param dstKey dest key with trailing / + * @param dirMarkerTracker tracker of markers + * @return how long it took. + */ + private OperationDuration copyEmptyDirectoryMarkers( + final String srcKey, + final String dstKey, + final DirMarkerTracker dirMarkerTracker) throws IOException { + // directory marker work. + LOG.debug("Copying markers from {}", dirMarkerTracker); + final StoreContext storeContext = getStoreContext(); + Map leafMarkers = + dirMarkerTracker.getLeafMarkers(); + Map surplus = + dirMarkerTracker.getSurplusMarkers(); + // for all leaf markers: copy the original + DurationInfo duration = new DurationInfo(LOG, false, + "copying %d leaf markers with %d surplus not copied", + leafMarkers.size(), surplus.size()); + for (DirMarkerTracker.Marker entry: leafMarkers.values()) { + Path source = entry.getPath(); + String key = entry.getKey(); + String newDestKey = + dstKey + key.substring(srcKey.length()); + Path childDestPath = storeContext.keyToPath(newDestKey); + LOG.debug("copying dir marker from {} to {}", key, newDestKey); + + activeCopies.add( + initiateCopy( + entry.getStatus(), + key, + source, + newDestKey, + childDestPath)); + queueToDelete(entry); + // end of loop + endOfLoopActions(); + } + duration.close(); + return duration; + } + /** * Initiate a copy operation in the executor. * @param source status of the source object. @@ -487,6 +674,16 @@ private void removeSourceObjects( List undeletedObjects = new ArrayList<>(); try { // remove the keys + + // list what is being deleted for the interest of anyone + // who is trying to debug why objects are no longer there. + if (LOG.isDebugEnabled()) { + LOG.debug("Initiating delete operation for {} objects", keys.size()); + for (DeleteObjectsRequest.KeyVersion key : keys) { + LOG.debug(" {} {}", key.getKey(), + key.getVersion() != null ? key.getVersion() : ""); + } + } // this will update the metastore on a failure, but on // a successful operation leaves the store as is. callbacks.removeKeys( @@ -498,7 +695,7 @@ private void removeSourceObjects( // and clear the list. } catch (AmazonClientException | IOException e) { // Failed. - // Notify the rename operation. + // Notify the rename tracker. // removeKeys will have already purged the metastore of // all keys it has known to delete; this is just a final // bit of housekeeping and a chance to tune exception diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java new file mode 100644 index 0000000000000..5ff116fe38ec7 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java @@ -0,0 +1,440 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CompletableFuture; + +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.BBPartHandle; +import org.apache.hadoop.fs.BBUploadHandle; +import org.apache.hadoop.fs.PartHandle; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathHandle; +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.UploadHandle; +import org.apache.hadoop.fs.impl.AbstractMultipartUploader; +import org.apache.hadoop.fs.s3a.WriteOperations; +import org.apache.hadoop.fs.s3a.statistics.S3AMultipartUploaderStatistics; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.fs.statistics.IOStatistics; + +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; + +/** + * MultipartUploader for S3AFileSystem. This uses the S3 multipart + * upload mechanism. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +class S3AMultipartUploader extends AbstractMultipartUploader { + + private final S3AMultipartUploaderBuilder builder; + + /** Header for serialized Parts: {@value}. */ + + public static final String HEADER = "S3A-part01"; + + private final WriteOperations writeOperations; + + private final StoreContext context; + + private final S3AMultipartUploaderStatistics statistics; + + /** + * Bulk state; demand created and then retained. + */ + private BulkOperationState operationState; + + /** + * Was an operation state requested but not returned? + */ + private boolean noOperationState; + + /** + * Instatiate; this is called by the builder. + * @param builder builder + * @param writeOperations writeOperations + * @param context s3a context + * @param statistics statistics callbacks + */ + S3AMultipartUploader( + final S3AMultipartUploaderBuilder builder, + final WriteOperations writeOperations, + final StoreContext context, + final S3AMultipartUploaderStatistics statistics) { + super(context.makeQualified(builder.getPath())); + this.builder = builder; + this.writeOperations = writeOperations; + this.context = context; + this.statistics = Objects.requireNonNull(statistics); + } + + @Override + public void close() throws IOException { + if (operationState != null) { + operationState.close(); + } + super.close(); + } + + @Override + public IOStatistics getIOStatistics() { + return statistics.getIOStatistics(); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "S3AMultipartUploader{"); + sb.append("base=").append(getBasePath()); + sb.append("; statistics=").append( + ioStatisticsToString(statistics.getIOStatistics())); + sb.append('}'); + return sb.toString(); + } + + /** + * Retrieve the operation state; create one on demand if needed + * and there has been no unsuccessful attempt to create one. + * @return an active operation state. + * @throws IOException failure + */ + private synchronized BulkOperationState retrieveOperationState() + throws IOException { + if (operationState == null && !noOperationState) { + operationState = writeOperations.initiateOperation(getBasePath(), + BulkOperationState.OperationType.Upload); + noOperationState = operationState != null; + } + return operationState; + } + + @Override + public CompletableFuture startUpload( + final Path filePath) + throws IOException { + Path dest = context.makeQualified(filePath); + checkPath(dest); + String key = context.pathToKey(dest); + return context.submit(new CompletableFuture<>(), + () -> { + String uploadId = writeOperations.initiateMultiPartUpload(key); + statistics.uploadStarted(); + return BBUploadHandle.from(ByteBuffer.wrap( + uploadId.getBytes(Charsets.UTF_8))); + }); + } + + @Override + public CompletableFuture putPart( + final UploadHandle uploadId, + final int partNumber, + final Path filePath, + final InputStream inputStream, + final long lengthInBytes) + throws IOException { + Path dest = context.makeQualified(filePath); + checkPutArguments(dest, inputStream, partNumber, uploadId, + lengthInBytes); + byte[] uploadIdBytes = uploadId.toByteArray(); + checkUploadId(uploadIdBytes); + String key = context.pathToKey(dest); + String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8); + return context.submit(new CompletableFuture<>(), + () -> { + UploadPartRequest request = writeOperations.newUploadPartRequest(key, + uploadIdString, partNumber, (int) lengthInBytes, inputStream, + null, 0L); + UploadPartResult result = writeOperations.uploadPart(request); + statistics.partPut(lengthInBytes); + String eTag = result.getETag(); + return BBPartHandle.from( + ByteBuffer.wrap( + buildPartHandlePayload( + filePath.toUri().toString(), + uploadIdString, + result.getPartNumber(), + eTag, + lengthInBytes))); + }); + } + + @Override + public CompletableFuture complete( + final UploadHandle uploadHandle, + final Path filePath, + final Map handleMap) + throws IOException { + Path dest = context.makeQualified(filePath); + checkPath(dest); + byte[] uploadIdBytes = uploadHandle.toByteArray(); + checkUploadId(uploadIdBytes); + checkPartHandles(handleMap); + List> handles = + new ArrayList<>(handleMap.entrySet()); + handles.sort(Comparator.comparingInt(Map.Entry::getKey)); + int count = handles.size(); + String key = context.pathToKey(dest); + + String uploadIdStr = new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8); + ArrayList eTags = new ArrayList<>(); + eTags.ensureCapacity(handles.size()); + long totalLength = 0; + // built up to identify duplicates -if the size of this set is + // below that of the number of parts, then there's a duplicate entry. + Set ids = new HashSet<>(count); + + for (Map.Entry handle : handles) { + PartHandlePayload payload = parsePartHandlePayload( + handle.getValue().toByteArray()); + payload.validate(uploadIdStr, filePath); + ids.add(payload.getPartNumber()); + totalLength += payload.getLen(); + eTags.add(new PartETag(handle.getKey(), payload.getEtag())); + } + Preconditions.checkArgument(ids.size() == count, + "Duplicate PartHandles"); + + // retrieve/create operation state for scalability of completion. + final BulkOperationState state = retrieveOperationState(); + long finalLen = totalLength; + return context.submit(new CompletableFuture<>(), + () -> { + CompleteMultipartUploadResult result = + writeOperations.commitUpload( + key, + uploadIdStr, + eTags, + finalLen, + state); + + byte[] eTag = result.getETag().getBytes(Charsets.UTF_8); + statistics.uploadCompleted(); + return (PathHandle) () -> ByteBuffer.wrap(eTag); + }); + } + + @Override + public CompletableFuture abort( + final UploadHandle uploadId, + final Path filePath) + throws IOException { + Path dest = context.makeQualified(filePath); + checkPath(dest); + final byte[] uploadIdBytes = uploadId.toByteArray(); + checkUploadId(uploadIdBytes); + String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, + Charsets.UTF_8); + return context.submit(new CompletableFuture<>(), + () -> { + writeOperations.abortMultipartCommit( + context.pathToKey(dest), + uploadIdString); + statistics.uploadAborted(); + return null; + }); + } + + /** + * Upload all MPUs under the path. + * @param path path to abort uploads under. + * @return a future which eventually returns the number of entries found + * @throws IOException submission failure + */ + @Override + public CompletableFuture abortUploadsUnderPath(final Path path) + throws IOException { + statistics.abortUploadsUnderPathInvoked(); + return context.submit(new CompletableFuture<>(), + () -> + writeOperations.abortMultipartUploadsUnderPath( + context.pathToKey(path))); + } + + /** + * Build the payload for marshalling. + * + * @param partNumber part number from response + * @param etag upload etag + * @param len length + * @return a byte array to marshall. + * @throws IOException error writing the payload + */ + @VisibleForTesting + static byte[] buildPartHandlePayload( + final String path, + final String uploadId, + final int partNumber, + final String etag, + final long len) + throws IOException { + + return new PartHandlePayload(path, uploadId, partNumber, len, etag) + .toBytes(); + } + + /** + * Parse the payload marshalled as a part handle. + * @param data handle data + * @return the length and etag + * @throws IOException error reading the payload + */ + @VisibleForTesting + static PartHandlePayload parsePartHandlePayload( + final byte[] data) + throws IOException { + + try (DataInputStream input = + new DataInputStream(new ByteArrayInputStream(data))) { + final String header = input.readUTF(); + if (!HEADER.equals(header)) { + throw new IOException("Wrong header string: \"" + header + "\""); + } + final String path = input.readUTF(); + final String uploadId = input.readUTF(); + final int partNumber = input.readInt(); + final long len = input.readLong(); + final String etag = input.readUTF(); + if (len < 0) { + throw new IOException("Negative length"); + } + return new PartHandlePayload(path, uploadId, partNumber, len, etag); + } + } + + /** + * Payload of a part handle; serializes + * the fields using DataInputStream and DataOutputStream. + */ + @VisibleForTesting + static final class PartHandlePayload { + + private final String path; + + private final String uploadId; + + private final int partNumber; + + private final long len; + + private final String etag; + + private PartHandlePayload( + final String path, + final String uploadId, + final int partNumber, + final long len, + final String etag) { + Preconditions.checkArgument(StringUtils.isNotEmpty(etag), + "Empty etag"); + Preconditions.checkArgument(StringUtils.isNotEmpty(path), + "Empty path"); + Preconditions.checkArgument(StringUtils.isNotEmpty(uploadId), + "Empty uploadId"); + Preconditions.checkArgument(len >= 0, + "Invalid length"); + + this.path = path; + this.uploadId = uploadId; + this.partNumber = partNumber; + this.len = len; + this.etag = etag; + } + + public String getPath() { + return path; + } + + public int getPartNumber() { + return partNumber; + } + + public long getLen() { + return len; + } + + public String getEtag() { + return etag; + } + + public String getUploadId() { + return uploadId; + } + + public byte[] toBytes() + throws IOException { + Preconditions.checkArgument(StringUtils.isNotEmpty(etag), + "Empty etag"); + Preconditions.checkArgument(len >= 0, + "Invalid length"); + + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + try (DataOutputStream output = new DataOutputStream(bytes)) { + output.writeUTF(HEADER); + output.writeUTF(path); + output.writeUTF(uploadId); + output.writeInt(partNumber); + output.writeLong(len); + output.writeUTF(etag); + } + return bytes.toByteArray(); + } + + public void validate(String uploadIdStr, Path filePath) + throws PathIOException { + String destUri = filePath.toUri().toString(); + if (!destUri.equals(path)) { + throw new PathIOException(destUri, + "Multipart part path mismatch: " + path); + } + if (!uploadIdStr.equals(uploadId)) { + throw new PathIOException(destUri, + "Multipart part ID mismatch: " + uploadId); + } + } + } + + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploaderBuilder.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploaderBuilder.java new file mode 100644 index 0000000000000..cb38b82a765cb --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploaderBuilder.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import javax.annotation.Nonnull; +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.impl.MultipartUploaderBuilderImpl; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.WriteOperations; +import org.apache.hadoop.fs.s3a.statistics.S3AMultipartUploaderStatistics; + +/** + * Builder for S3A multipart uploaders. + */ +public class S3AMultipartUploaderBuilder extends + MultipartUploaderBuilderImpl { + + private final WriteOperations writeOperations; + + private final StoreContext context; + + private final S3AMultipartUploaderStatistics statistics; + + public S3AMultipartUploaderBuilder( + @Nonnull final S3AFileSystem fileSystem, + @Nonnull final WriteOperations writeOperations, + @Nonnull final StoreContext context, + @Nonnull final Path p, + @Nonnull final S3AMultipartUploaderStatistics statistics) { + super(fileSystem, p); + this.writeOperations = writeOperations; + this.context = context; + this.statistics = statistics; + } + + @Override + public S3AMultipartUploaderBuilder getThisBuilder() { + return this; + } + + @Override + public S3AMultipartUploader build() + throws IllegalArgumentException, IOException { + return new S3AMultipartUploader(this, writeOperations, context, statistics); + } + + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StatusProbeEnum.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StatusProbeEnum.java index f843b20ab28b0..3b69c7efe3741 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StatusProbeEnum.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StatusProbeEnum.java @@ -21,9 +21,12 @@ import java.util.EnumSet; import java.util.Set; +import org.apache.hadoop.classification.InterfaceAudience; + /** * Enum of probes which can be made of S3. */ +@InterfaceAudience.Private public enum StatusProbeEnum { /** The actual path. */ @@ -33,28 +36,23 @@ public enum StatusProbeEnum { /** LIST under the path. */ List; - /** All probes. */ - public static final Set ALL = EnumSet.allOf( - StatusProbeEnum.class); - - /** Skip the HEAD and only look for directories. */ - public static final Set DIRECTORIES = - EnumSet.of(DirMarker, List); - - /** We only want the HEAD or dir marker. */ - public static final Set HEAD_OR_DIR_MARKER = - EnumSet.of(Head, DirMarker); + /** Look for files and directories. */ + public static final Set ALL = + EnumSet.of(Head, List); /** We only want the HEAD. */ public static final Set HEAD_ONLY = EnumSet.of(Head); - /** We only want the dir marker. */ - public static final Set DIR_MARKER_ONLY = - EnumSet.of(DirMarker); - - /** We only want the dir marker. */ + /** List operation only. */ public static final Set LIST_ONLY = EnumSet.of(List); + /** Look for files and directories. */ + public static final Set FILE = + HEAD_ONLY; + + /** Skip the HEAD and only look for directories. */ + public static final Set DIRECTORIES = + LIST_ONLY; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java index 88480db753515..88231d8af9c04 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java @@ -21,9 +21,13 @@ import java.io.File; import java.io.IOException; import java.net.URI; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; -import com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -31,12 +35,13 @@ import org.apache.hadoop.fs.s3a.Invoker; import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.S3AInputPolicy; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; import org.apache.hadoop.fs.s3a.S3AStorageStatistics; import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.SemaphoredDelegatingExecutor; /** @@ -49,9 +54,10 @@ * their own. * * Warning: this really is private and unstable. Do not use - * outside the org.apache.hadoop.fs.s3a package. + * outside the org.apache.hadoop.fs.s3a package, or in extension points + * such as DelegationTokens. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate("S3A Filesystem and extensions") @InterfaceStability.Unstable public class StoreContext { @@ -84,7 +90,8 @@ public class StoreContext { private final Invoker invoker; /** Instrumentation and statistics. */ - private final S3AInstrumentation instrumentation; + private final S3AStatisticsContext instrumentation; + private final S3AStorageStatistics storageStatistics; /** Seek policy. */ @@ -114,8 +121,7 @@ public class StoreContext { /** * Instantiate. - * No attempt to use a builder here as outside tests - * this should only be created in the S3AFileSystem. + * @deprecated as public method: use {@link StoreContextBuilder}. */ public StoreContext( final URI fsURI, @@ -123,10 +129,10 @@ public StoreContext( final Configuration configuration, final String username, final UserGroupInformation owner, - final ListeningExecutorService executor, + final ExecutorService executor, final int executorCapacity, final Invoker invoker, - final S3AInstrumentation instrumentation, + final S3AStatisticsContext instrumentation, final S3AStorageStatistics storageStatistics, final S3AInputPolicy inputPolicy, final ChangeDetectionPolicy changeDetectionPolicy, @@ -140,7 +146,7 @@ public StoreContext( this.configuration = configuration; this.username = username; this.owner = owner; - this.executor = executor; + this.executor = MoreExecutors.listeningDecorator(executor); this.executorCapacity = executorCapacity; this.invoker = invoker; this.instrumentation = instrumentation; @@ -175,7 +181,7 @@ public String getUsername() { return username; } - public ListeningExecutorService getExecutor() { + public ExecutorService getExecutor() { return executor; } @@ -183,7 +189,12 @@ public Invoker getInvoker() { return invoker; } - public S3AInstrumentation getInstrumentation() { + /** + * Get the statistics context for this StoreContext. + * @return the statistics context this store context was created + * with. + */ + public S3AStatisticsContext getInstrumentation() { return instrumentation; } @@ -207,6 +218,10 @@ public boolean isUseListV1() { return useListV1; } + public ContextAccessors getContextAccessors() { + return contextAccessors; + } + /** * Convert a key to a fully qualified path. * @param key input key @@ -226,6 +241,16 @@ public String pathToKey(Path path) { return contextAccessors.pathToKey(path); } + /** + * Qualify a path. + * + * @param path path to qualify/normalize + * @return possibly new path. + */ + public Path makeQualified(Path path) { + return contextAccessors.makeQualified(path); + } + /** * Get the storage statistics of this filesystem. * @return the storage statistics @@ -251,7 +276,6 @@ public void incrementStatistic(Statistic statistic) { */ public void incrementStatistic(Statistic statistic, long count) { instrumentation.incrementCounter(statistic, count); - storageStatistics.incrementCounter(statistic, count); } /** @@ -288,7 +312,7 @@ public void incrementGauge(Statistic statistic, long count) { * @param capacity maximum capacity of this executor. * @return an executor for submitting work. */ - public ListeningExecutorService createThrottledExecutor(int capacity) { + public ExecutorService createThrottledExecutor(int capacity) { return new SemaphoredDelegatingExecutor(executor, capacity, true); } @@ -298,7 +322,7 @@ public ListeningExecutorService createThrottledExecutor(int capacity) { * {@link #executorCapacity}. * @return a new executor for exclusive use by the caller. */ - public ListeningExecutorService createThrottledExecutor() { + public ExecutorService createThrottledExecutor() { return createThrottledExecutor(executorCapacity); } @@ -351,4 +375,20 @@ public String fullKey(final S3AFileStatus stat) { ? k + "/" : k; } + + /** + * Submit a closure for execution in the executor + * returned by {@link #getExecutor()}. + * @param type of future + * @param future future for the result. + * @param call callable to invoke. + * @return the future passed in + */ + public CompletableFuture submit( + final CompletableFuture future, + final Callable call) { + getExecutor().submit(() -> + LambdaUtils.eval(future, call)); + return future; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java new file mode 100644 index 0000000000000..13953f9c985f3 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.net.URI; +import java.util.concurrent.ExecutorService; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.s3a.Invoker; +import org.apache.hadoop.fs.s3a.S3AInputPolicy; +import org.apache.hadoop.fs.s3a.S3AStorageStatistics; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * Builder for the store context. + */ +public class StoreContextBuilder { + + private URI fsURI; + + private String bucket; + + private Configuration configuration; + + private String username; + + private UserGroupInformation owner; + + private ExecutorService executor; + + private int executorCapacity; + + private Invoker invoker; + + private S3AStatisticsContext instrumentation; + + private S3AStorageStatistics storageStatistics; + + private S3AInputPolicy inputPolicy = S3AInputPolicy.Normal; + + private ChangeDetectionPolicy changeDetectionPolicy; + + private boolean multiObjectDeleteEnabled = true; + + private MetadataStore metadataStore; + + private boolean useListV1 = false; + + private ContextAccessors contextAccessors; + + private ITtlTimeProvider timeProvider; + + public StoreContextBuilder setFsURI(final URI fsURI) { + this.fsURI = fsURI; + return this; + } + + public StoreContextBuilder setBucket(final String b) { + this.bucket = b; + return this; + } + + public StoreContextBuilder setConfiguration(final Configuration conf) { + this.configuration = conf; + return this; + } + + public StoreContextBuilder setUsername(final String user) { + this.username = user; + return this; + } + + public StoreContextBuilder setOwner(final UserGroupInformation ugi) { + this.owner = ugi; + return this; + } + + public StoreContextBuilder setExecutor( + final ExecutorService ex) { + this.executor = ex; + return this; + } + + public StoreContextBuilder setExecutorCapacity( + final int capacity) { + this.executorCapacity = capacity; + return this; + } + + public StoreContextBuilder setInvoker(final Invoker invoke) { + this.invoker = invoke; + return this; + } + + public StoreContextBuilder setInstrumentation( + final S3AStatisticsContext instr) { + this.instrumentation = instr; + return this; + } + + public StoreContextBuilder setStorageStatistics( + final S3AStorageStatistics sstats) { + this.storageStatistics = sstats; + return this; + } + + public StoreContextBuilder setInputPolicy( + final S3AInputPolicy policy) { + this.inputPolicy = policy; + return this; + } + + public StoreContextBuilder setChangeDetectionPolicy( + final ChangeDetectionPolicy policy) { + this.changeDetectionPolicy = policy; + return this; + } + + public StoreContextBuilder setMultiObjectDeleteEnabled( + final boolean enabled) { + this.multiObjectDeleteEnabled = enabled; + return this; + } + + public StoreContextBuilder setMetadataStore( + final MetadataStore store) { + this.metadataStore = store; + return this; + } + + public StoreContextBuilder setUseListV1( + final boolean useV1) { + this.useListV1 = useV1; + return this; + } + + public StoreContextBuilder setContextAccessors( + final ContextAccessors accessors) { + this.contextAccessors = accessors; + return this; + } + + public StoreContextBuilder setTimeProvider( + final ITtlTimeProvider provider) { + this.timeProvider = provider; + return this; + } + + @SuppressWarnings("deprecation") + public StoreContext build() { + return new StoreContext(fsURI, + bucket, + configuration, + username, + owner, + executor, + executorCapacity, + invoker, + instrumentation, + storageStatistics, + inputPolicy, + changeDetectionPolicy, + multiObjectDeleteEnabled, + metadataStore, + useListV1, + contextAccessors, + timeProvider); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/AuthoritativeAuditOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/AuthoritativeAuditOperation.java index 7bfbd9a3c9baa..111dd46fedd80 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/AuthoritativeAuditOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/AuthoritativeAuditOperation.java @@ -23,7 +23,7 @@ import java.util.Collection; import java.util.Queue; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java index fcb3dce4d0b45..b4974b7356514 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/BulkOperationState.java @@ -102,5 +102,9 @@ public enum OperationType { * Mkdir operation. */ Mkdir, + /** + * Multipart upload operation. + */ + Upload } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DescendantsIterator.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DescendantsIterator.java index 88a46745b11bf..a69109c9c88bb 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DescendantsIterator.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DescendantsIterator.java @@ -24,7 +24,7 @@ import java.util.NoSuchElementException; import java.util.Queue; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java index 213ffdc983718..63600b3a94202 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java @@ -25,11 +25,12 @@ import java.util.Collections; import java.util.HashSet; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -118,6 +119,10 @@ public Collection getListing() { return Collections.unmodifiableCollection(listMap.values()); } + /** + * List all tombstones. + * @return all tombstones in the listing. + */ public Set listTombstones() { Set tombstones = new HashSet<>(); for (PathMetadata meta : listMap.values()) { @@ -128,6 +133,12 @@ public Set listTombstones() { return tombstones; } + /** + * Get the directory listing excluding tombstones. + * Returns a new DirListingMetadata instances, without the tombstones -the + * lastUpdated field is copied from this instance. + * @return a new DirListingMetadata without the tombstones. + */ public DirListingMetadata withoutTombstones() { Collection filteredList = new ArrayList<>(); for (PathMetadata meta : listMap.values()) { @@ -143,6 +154,7 @@ public DirListingMetadata withoutTombstones() { * @return number of entries tracked. This is not the same as the number * of entries in the actual directory unless {@link #isAuthoritative()} is * true. + * It will also include any tombstones. */ public int numEntries() { return listMap.size(); @@ -251,19 +263,24 @@ public String toString() { * Remove expired entries from the listing based on TTL. * @param ttl the ttl time * @param now the current time + * @return the expired values. */ - public synchronized void removeExpiredEntriesFromListing(long ttl, - long now) { + public synchronized List removeExpiredEntriesFromListing( + long ttl, long now) { + List expired = new ArrayList<>(); final Iterator> iterator = listMap.entrySet().iterator(); while (iterator.hasNext()) { final Map.Entry entry = iterator.next(); // we filter iff the lastupdated is not 0 and the entry is expired - if (entry.getValue().getLastUpdated() != 0 - && (entry.getValue().getLastUpdated() + ttl) <= now) { + PathMetadata metadata = entry.getValue(); + if (metadata.getLastUpdated() != 0 + && (metadata.getLastUpdated() + ttl) <= now) { + expired.add(metadata); iterator.remove(); } } + return expired; } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java index 536481ac23b7e..20bd250da0c38 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DumpS3GuardDynamoTable.java @@ -35,7 +35,7 @@ import java.util.List; import com.amazonaws.services.dynamodbv2.xspec.ExpressionSpecBuilder; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,7 +59,7 @@ import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.ExitUtil; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.S3AUtils.ACCEPT_ALL; /** @@ -348,8 +348,8 @@ protected long dumpRawS3ObjectStore( final CsvFile csv) throws IOException { S3AFileSystem fs = getFilesystem(); Path rootPath = fs.qualify(new Path("/")); - Listing listing = new Listing(fs); - S3ListRequest request = fs.createListObjectsRequest("", null); + Listing listing = fs.getListing(); + S3ListRequest request = listing.createListObjectsRequest("", null); long count = 0; RemoteIterator st = listing.createFileStatusListingIterator(rootPath, request, diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java index b6ff4d982ddce..e4275ccb51c4c 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java @@ -25,7 +25,7 @@ import com.amazonaws.regions.Regions; import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index 38b38fb7f93e7..b963e7e2532e5 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -21,6 +21,7 @@ import javax.annotation.Nullable; import java.io.IOException; import java.io.InterruptedIOException; +import java.io.UncheckedIOException; import java.net.URI; import java.nio.file.AccessDeniedException; import java.util.ArrayList; @@ -62,10 +63,11 @@ import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription; import com.amazonaws.services.dynamodbv2.model.TableDescription; import com.amazonaws.services.dynamodbv2.model.WriteRequest; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,8 +81,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.RemoteIterator; -import org.apache.hadoop.fs.impl.FunctionsRaisingIOE; -import org.apache.hadoop.fs.impl.WrappedIOException; +import org.apache.hadoop.util.functional.CallableRaisingIOE; +import org.apache.hadoop.util.functional.RemoteIterators; import org.apache.hadoop.fs.s3a.AWSCredentialProviderList; import org.apache.hadoop.fs.s3a.AWSServiceThrottledException; import org.apache.hadoop.fs.s3a.Constants; @@ -450,9 +452,11 @@ void bindToOwnerFilesystem(final S3AFileSystem fs) { owner = fs; conf = owner.getConf(); StoreContext context = owner.createStoreContext(); - instrumentation = context.getInstrumentation().getS3GuardInstrumentation(); + instrumentation = context.getInstrumentation() + .getS3GuardInstrumentation(); username = context.getUsername(); - executor = context.createThrottledExecutor(); + executor = MoreExecutors.listeningDecorator( + context.createThrottledExecutor()); ttlTimeProvider = Preconditions.checkNotNull( context.getTimeProvider(), "ttlTimeProvider must not be null"); @@ -507,13 +511,14 @@ public void initialize(Configuration config, // the executor capacity for work. int executorCapacity = intOption(conf, EXECUTOR_CAPACITY, DEFAULT_EXECUTOR_CAPACITY, 1); - executor = BlockingThreadPoolExecutorService.newInstance( - executorCapacity, - executorCapacity * 2, - longOption(conf, KEEPALIVE_TIME, - DEFAULT_KEEPALIVE_TIME, 0), - TimeUnit.SECONDS, - "s3a-ddb-" + tableName); + executor = MoreExecutors.listeningDecorator( + BlockingThreadPoolExecutorService.newInstance( + executorCapacity, + executorCapacity * 2, + longOption(conf, KEEPALIVE_TIME, + DEFAULT_KEEPALIVE_TIME, 0), + TimeUnit.SECONDS, + "s3a-ddb-" + tableName)); initDataAccessRetries(conf); this.ttlTimeProvider = ttlTp; @@ -638,8 +643,9 @@ public void deleteSubtree(Path path, LOG.debug("Subtree path {} is deleted; this will be a no-op", path); return; } - deleteEntries(new InternalIterators.PathFromRemoteStatusIterator( - new DescendantsIterator(this, meta)), + deleteEntries(RemoteIterators.mappingRemoteIterator( + new DescendantsIterator(this, meta), + FileStatus::getPath), operationState); } @@ -648,8 +654,7 @@ public void deleteSubtree(Path path, public void deletePaths(Collection paths, final BulkOperationState operationState) throws IOException { - deleteEntries( - new InternalIterators.RemoteIteratorFromIterator<>(paths.iterator()), + deleteEntries(RemoteIterators.remoteIteratorFromIterable(paths), operationState); } @@ -717,7 +722,7 @@ public DDBPathMetadata get(Path path) throws IOException { public DDBPathMetadata get(Path path, boolean wantEmptyDirectoryFlag) throws IOException { checkPath(path); - LOG.debug("Get from table {} in region {}: {}. wantEmptyDirectory={}", + LOG.debug("Get from table {} in region {}: {} ; wantEmptyDirectory={}", tableName, region, path, wantEmptyDirectoryFlag); DDBPathMetadata result = innerGet(path, wantEmptyDirectoryFlag); LOG.debug("result of get {} is: {}", path, result); @@ -826,7 +831,7 @@ public DirListingMetadata listChildren(final Path path) throws IOException { for (Item item : wrapWithRetries(items)) { metas.add(itemToPathMetadata(item, username)); } - } catch (WrappedIOException e) { + } catch (UncheckedIOException e) { // failure in the iterators; unwrap. throw e.getCause(); } @@ -912,17 +917,27 @@ private Collection completeAncestry( DDBPathMetadata oldEntry = ancestorState.put(path, entry); boolean addAncestors = true; if (oldEntry != null) { - if (!oldEntry.getFileStatus().isDirectory() - || !entry.getFileStatus().isDirectory()) { - // check for and warn if the existing bulk operation overwrote it. - // this should never occur outside tests explicitly creating it + // check for and warn if the existing bulk operation has an inconsistent + // entry. + // two directories or two files are both allowed. + // file-over-file can happen in multipart uploaders when the same + // uploader is overwriting file entries to the same destination as + // part of its bulk operation. + boolean oldWasDir = oldEntry.getFileStatus().isDirectory(); + boolean newIsDir = entry.getFileStatus().isDirectory(); + if ((oldWasDir && !newIsDir) + || (!oldWasDir && newIsDir)) { LOG.warn("Overwriting a S3Guard file created in the operation: {}", oldEntry); LOG.warn("With new entry: {}", entry); // restore the old state ancestorState.put(path, oldEntry); // then raise an exception - throw new PathIOException(path.toString(), E_INCONSISTENT_UPDATE); + throw new PathIOException(path.toString(), + String.format("%s old %s new %s", + E_INCONSISTENT_UPDATE, + oldEntry, + entry)); } else { // a directory is already present. Log and continue. LOG.debug("Directory at {} being updated with value {}", @@ -1624,7 +1639,7 @@ private int innerPrune( Set clearedParentPathSet = new HashSet<>(); // declare the operation to delete a batch as a function so // as to keep the code consistent across multiple uses. - FunctionsRaisingIOE.CallableRaisingIOE deleteBatchOperation = + CallableRaisingIOE deleteBatchOperation = () -> { // lowest path entries get deleted first. deletionBatch.sort(PathOrderComparators.TOPMOST_PATH_LAST); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStoreTableManager.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStoreTableManager.java index 6383f001a3c0f..d04ea3ebdc757 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStoreTableManager.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStoreTableManager.java @@ -51,8 +51,8 @@ import com.amazonaws.services.dynamodbv2.model.Tag; import com.amazonaws.services.dynamodbv2.model.TagResourceRequest; import com.amazonaws.waiters.WaiterTimedOutException; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ITtlTimeProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ITtlTimeProvider.java index daee6211b41d9..aa7fc4721b483 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ITtlTimeProvider.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ITtlTimeProvider.java @@ -29,6 +29,19 @@ * Time is measured in milliseconds, */ public interface ITtlTimeProvider { + + /** + * The current time in milliseconds. + * Assuming this calls System.currentTimeMillis(), this is a native iO call + * and so should be invoked sparingly (i.e. evaluate before any loop, rather + * than inside). + * @return the current time. + */ long getNow(); + + /** + * The TTL of the metadata. + * @return time in millis after which metadata is considered out of date. + */ long getMetadataTtl(); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ImportOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ImportOperation.java index 9418d6c7543ac..9c8b702228608 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ImportOperation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ImportOperation.java @@ -23,7 +23,7 @@ import java.util.HashSet; import java.util.Set; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/InternalIterators.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/InternalIterators.java deleted file mode 100644 index dd6fb5ff11544..0000000000000 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/InternalIterators.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.s3a.s3guard; - -import java.io.IOException; -import java.util.Iterator; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; -import org.apache.hadoop.fs.s3a.S3AFileStatus; - -/** - * Internal iterators. - */ -final class InternalIterators { - - private InternalIterators() { - } - - /** - * From a remote status iterator, build a path iterator. - */ - static final class PathFromRemoteStatusIterator implements - RemoteIterator { - - private final RemoteIterator source; - - /** - * Construct. - * @param source source iterator. - */ - PathFromRemoteStatusIterator(final RemoteIterator source) { - this.source = source; - } - - @Override - public boolean hasNext() throws IOException { - return source.hasNext(); - } - - @Override - public Path next() throws IOException { - return source.next().getPath(); - } - } - - /** - * From a classic java.util.Iterator, build a Hadoop remote iterator. - * @param type of iterated value. - */ - static final class RemoteIteratorFromIterator implements - RemoteIterator { - - private final Iterator source; - - /** - * Construct. - * @param source source iterator. - */ - RemoteIteratorFromIterator(final Iterator source) { - this.source = source; - } - - @Override - public boolean hasNext() { - return source.hasNext(); - } - - @Override - public T next() { - return source.next(); - } - } - -} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java index bd0b7200cb354..9d0ba9171abc2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java @@ -20,11 +20,11 @@ import javax.annotation.Nullable; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java index 2f71884b9a36c..94d8063537159 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java @@ -25,7 +25,7 @@ import java.util.List; import java.util.Map; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreListFilesIterator.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreListFilesIterator.java index 817cef67d3ed7..a92aaeb05604a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreListFilesIterator.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreListFilesIterator.java @@ -27,7 +27,7 @@ import java.util.Queue; import java.util.Set; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java index 666c233575ad6..722f42176ef2f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/NullMetadataStore.java @@ -172,8 +172,10 @@ private static final class NullRenameTracker extends RenameTracker { private NullRenameTracker( final StoreContext storeContext, final Path source, - final Path dest, MetadataStore metadataStore) { - super("null tracker", storeContext, metadataStore, source, dest, null); + final Path dest, + MetadataStore metadataStore) { + super("NullRenameTracker", storeContext, metadataStore, source, dest, + null); } @Override diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadata.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadata.java index 5f9b43f191006..09297056f6539 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadata.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadata.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a.s3guard; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadataDynamoDBTranslation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadataDynamoDBTranslation.java index 7389ec52c249c..413ecf1f422bb 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadataDynamoDBTranslation.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PathMetadataDynamoDBTranslation.java @@ -34,8 +34,8 @@ import com.amazonaws.services.dynamodbv2.model.KeySchemaElement; import com.amazonaws.services.dynamodbv2.model.KeyType; import com.amazonaws.services.dynamodbv2.model.ScalarAttributeType; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ProgressiveRenameTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ProgressiveRenameTracker.java index c7fb407c16dca..700b2ab71a6c8 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ProgressiveRenameTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/ProgressiveRenameTracker.java @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.s3a.impl.StoreContext; import org.apache.hadoop.util.DurationInfo; -import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.addMoveAncestors; import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.addMoveDir; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PurgeS3GuardDynamoTable.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PurgeS3GuardDynamoTable.java index ad298c222adb2..746d34fda5c12 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PurgeS3GuardDynamoTable.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/PurgeS3GuardDynamoTable.java @@ -43,7 +43,7 @@ import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.ExitUtil; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.s3guard.DumpS3GuardDynamoTable.serviceMain; import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.PARENT; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RenameTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RenameTracker.java index 19d4568d06db9..5c112fc1047d5 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RenameTracker.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RenameTracker.java @@ -33,7 +33,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DurationInfo; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.S3AUtils.translateException; /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RetryingCollection.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RetryingCollection.java index 394f393c57ec5..108d205f74b05 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RetryingCollection.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/RetryingCollection.java @@ -19,9 +19,9 @@ package org.apache.hadoop.fs.s3a.s3guard; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Iterator; -import org.apache.hadoop.fs.impl.WrappedIOException; import org.apache.hadoop.fs.s3a.Invoker; import org.apache.hadoop.fs.s3a.Retries; @@ -88,7 +88,7 @@ private RetryingIterator(final Iterator iterator) { /** * {@inheritDoc}. - * @throws WrappedIOException for IO failure, including throttling. + * @throws UncheckedIOException for IO failure, including throttling. */ @Override @Retries.RetryTranslated @@ -100,13 +100,13 @@ public boolean hasNext() { true, iterator::hasNext); } catch (IOException e) { - throw new WrappedIOException(e); + throw new UncheckedIOException(e); } } /** * {@inheritDoc}. - * @throws WrappedIOException for IO failure, including throttling. + * @throws UncheckedIOException for IO failure, including throttling. */ @Override @Retries.RetryTranslated @@ -118,7 +118,7 @@ public T next() { true, iterator::next); } catch (IOException e) { - throw new WrappedIOException(e); + throw new UncheckedIOException(e); } } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java index 877dc58612b65..2836887c71fb3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java @@ -35,8 +35,8 @@ import javax.annotation.Nullable; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.s3a.S3AFileSystem; @@ -53,12 +53,14 @@ import org.apache.hadoop.fs.s3a.Retries.RetryTranslated; import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ReflectionUtils; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_AUTHORITATIVE_PATH; import static org.apache.hadoop.fs.s3a.S3AUtils.createUploadFileStatus; import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.authoritativeEmptyDirectoryMarker; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_BAD_CONFIGURATION; /** * Logic for integrating MetadataStore with S3A. @@ -157,6 +159,54 @@ static Class getMetadataStoreClass( } + /** + * We update the metastore for the specific case of S3 value == S3Guard value + * so as to place a more recent modtime in the store. + * because if not, we will continue to probe S3 whenever we look for this + * object, even we only do this if confident the S3 status is the same + * as the one in the store (i.e. it is not an older version) + * @param metadataStore MetadataStore to {@code put()} into. + * @param pm current data + * @param s3AFileStatus status to store + * @param timeProvider Time provider to use when writing entries + * @return true if the entry was updated. + * @throws IOException if metadata store update failed + */ + @RetryTranslated + public static boolean refreshEntry( + MetadataStore metadataStore, + PathMetadata pm, + S3AFileStatus s3AFileStatus, + ITtlTimeProvider timeProvider) throws IOException { + // the modtime of the data is the same as/older than the s3guard value + // either an old object has been found, or the existing one was retrieved + // in both cases -return s3guard value + S3AFileStatus msStatus = pm.getFileStatus(); + + // first check: size + boolean sizeMatch = msStatus.getLen() == s3AFileStatus.getLen(); + + // etags are expected on all objects, but handle the situation + // that a third party store doesn't serve them. + String s3Etag = s3AFileStatus.getETag(); + String pmEtag = msStatus.getETag(); + boolean etagsMatch = s3Etag != null && s3Etag.equals(pmEtag); + + // version ID: only in some stores, and will be missing in the metastore + // if the entry was created through a list operation. + String s3VersionId = s3AFileStatus.getVersionId(); + String pmVersionId = msStatus.getVersionId(); + boolean versionsMatchOrMissingInMetastore = + pmVersionId == null || pmVersionId.equals(s3VersionId); + if (sizeMatch && etagsMatch && versionsMatchOrMissingInMetastore) { + // update the store, return the new value + LOG.debug("Refreshing the metastore entry/timestamp"); + putAndReturn(metadataStore, s3AFileStatus, timeProvider); + return true; + } + return false; + } + /** * Helper function which puts a given S3AFileStatus into the MetadataStore and * returns the same S3AFileStatus. Instrumentation monitors the put operation. @@ -245,30 +295,6 @@ public static BulkOperationState initiateBulkWrite( } } - /** - * Convert the data of an iterator of {@link S3AFileStatus} to - * an array. Given tombstones are filtered out. If the iterator - * does return any item, an empty array is returned. - * @param iterator a non-null iterator - * @param tombstones - * @return a possibly-empty array of file status entries - * @throws IOException - */ - public static S3AFileStatus[] iteratorToStatuses( - RemoteIterator iterator, Set tombstones) - throws IOException { - List statuses = new ArrayList<>(); - - while (iterator.hasNext()) { - S3AFileStatus status = iterator.next(); - if (!tombstones.contains(status.getPath())) { - statuses.add(status); - } - } - - return statuses.toArray(new S3AFileStatus[0]); - } - /** * Convert the data of a directory listing to an array of {@link FileStatus} * entries. Tombstones are filtered out at this point. If the listing is null @@ -309,17 +335,22 @@ public static S3AFileStatus[] dirMetaToStatuses(DirListingMetadata dirMeta) { * @param dirMeta Directory listing from MetadataStore. May be null. * @param isAuthoritative State of authoritative mode * @param timeProvider Time provider to use when updating entries + * @param toStatusItr function to convert array of file status to + * RemoteIterator. * @return Final result of directory listing. * @throws IOException if metadata store update failed */ - public static FileStatus[] dirListingUnion(MetadataStore ms, Path path, - List backingStatuses, DirListingMetadata dirMeta, - boolean isAuthoritative, ITtlTimeProvider timeProvider) - throws IOException { + public static RemoteIterator dirListingUnion( + MetadataStore ms, Path path, + RemoteIterator backingStatuses, + DirListingMetadata dirMeta, boolean isAuthoritative, + ITtlTimeProvider timeProvider, + Function> toStatusItr) + throws IOException { // Fast-path for NullMetadataStore if (isNullMetadataStore(ms)) { - return backingStatuses.toArray(new FileStatus[backingStatuses.size()]); + return backingStatuses; } assertQualified(path); @@ -360,7 +391,7 @@ public static FileStatus[] dirListingUnion(MetadataStore ms, Path path, } IOUtils.cleanupWithLogger(LOG, operationState); - return dirMetaToStatuses(dirMeta); + return toStatusItr.apply(dirMetaToStatuses(dirMeta)); } /** @@ -379,7 +410,7 @@ public static FileStatus[] dirListingUnion(MetadataStore ms, Path path, private static void authoritativeUnion( final MetadataStore ms, final Path path, - final List backingStatuses, + final RemoteIterator backingStatuses, final DirListingMetadata dirMeta, final ITtlTimeProvider timeProvider, final BulkOperationState operationState) throws IOException { @@ -390,7 +421,8 @@ private static void authoritativeUnion( Set deleted = dirMeta.listTombstones(); final Map dirMetaMap = dirMeta.getListing().stream() .collect(Collectors.toMap(pm -> pm.getFileStatus().getPath(), pm -> pm)); - for (S3AFileStatus s : backingStatuses) { + while (backingStatuses.hasNext()) { + S3AFileStatus s = backingStatuses.next(); final Path statusPath = s.getPath(); if (deleted.contains(statusPath)) { continue; @@ -443,16 +475,17 @@ private static void authoritativeUnion( private static void nonAuthoritativeUnion( final MetadataStore ms, final Path path, - final List backingStatuses, + final RemoteIterator backingStatuses, final DirListingMetadata dirMeta, final ITtlTimeProvider timeProvider, final BulkOperationState operationState) throws IOException { - List entriesToAdd = new ArrayList<>(backingStatuses.size()); + List entriesToAdd = new ArrayList<>(); Set deleted = dirMeta.listTombstones(); final Map dirMetaMap = dirMeta.getListing().stream() .collect(Collectors.toMap(pm -> pm.getFileStatus().getPath(), pm -> pm)); - for (S3AFileStatus s : backingStatuses) { + while (backingStatuses.hasNext()) { + S3AFileStatus s = backingStatuses.next(); final Path statusPath = s.getPath(); if (deleted.contains(statusPath)) { continue; @@ -925,8 +958,10 @@ public static PathMetadata getWithTtl(MetadataStore ms, Path path, if (!pathMetadata.isExpired(ttl, timeProvider.getNow())) { return pathMetadata; } else { - LOG.debug("PathMetadata TTl for {} is expired in metadata store.", - path); + LOG.debug("PathMetadata TTl for {} is expired in metadata store" + + " -removing entry", path); + // delete the tombstone + ms.forgetMetadata(path); return null; } } @@ -938,6 +973,8 @@ public static PathMetadata getWithTtl(MetadataStore ms, Path path, * List children; mark the result as non-auth if the TTL has expired. * If the allowAuthoritative flag is true, return without filtering or * checking for TTL expiry. + * If false: the expiry scan takes place and the + * TODO: should we always purge tombstones? Even in auth? * @param ms metastore * @param path path to look up. * @param timeProvider nullable time provider @@ -966,9 +1003,15 @@ public static DirListingMetadata listChildrenWithTtl(MetadataStore ms, // filter expired entries if (dlm != null) { - dlm.removeExpiredEntriesFromListing( + List expired = dlm.removeExpiredEntriesFromListing( timeProvider.getMetadataTtl(), timeProvider.getNow()); + // now purge the tombstones + for (PathMetadata metadata : expired) { + if (metadata.isDeleted()) { + ms.forgetMetadata(metadata.getFileStatus().getPath()); + } + } } return dlm; @@ -1041,12 +1084,22 @@ public static boolean allowAuthoritative(Path p, S3AFileSystem fs, return false; } + /** + * Format string to use when warning that S3Guard is disabled. + */ + @VisibleForTesting public static final String DISABLED_LOG_MSG = - "S3Guard is disabled on this bucket: {}"; + "S3Guard is disabled on this bucket: %s"; + /** + * Error string use in exception raised on an unknown log level. + */ public static final String UNKNOWN_WARN_LEVEL = - "Unknown S3Guard disabled warn level: "; + "Unknown " + S3GUARD_DISABLED_WARN_LEVEL + " value: "; + /** + * Warning levels to use when reporting S3Guard as disabled. + */ public enum DisabledWarnLevel { SILENT, INFORM, @@ -1054,9 +1107,18 @@ public enum DisabledWarnLevel { FAIL } + /** + * Log that S3Guard is disabled -optionally raise an exception. + * @param logger Log to log to + * @param warnLevelStr string value of warn action. + * @param bucket bucket to use in log/error messages + * @throws ExitUtil.ExitException if s3guard was disabled + * and the log level is "fail" + * @throws IllegalArgumentException unknown warning level. + */ public static void logS3GuardDisabled(Logger logger, String warnLevelStr, String bucket) - throws UnsupportedOperationException, IllegalArgumentException { + throws ExitUtil.ExitException, IllegalArgumentException { final DisabledWarnLevel warnLevel; try { warnLevel = DisabledWarnLevel.valueOf(warnLevelStr.toUpperCase(Locale.US)); @@ -1064,19 +1126,20 @@ public static void logS3GuardDisabled(Logger logger, String warnLevelStr, throw new IllegalArgumentException(UNKNOWN_WARN_LEVEL + warnLevelStr, e); } + String text = String.format(DISABLED_LOG_MSG, bucket); switch (warnLevel) { case SILENT: - logger.debug(DISABLED_LOG_MSG, bucket); + logger.debug(text); break; case INFORM: - logger.info(DISABLED_LOG_MSG, bucket); + logger.info(text); break; case WARN: - logger.warn(DISABLED_LOG_MSG, bucket); + logger.warn(text); break; case FAIL: - logger.error(DISABLED_LOG_MSG, bucket); - throw new UnsupportedOperationException(DISABLED_LOG_MSG + bucket); + logger.error(text); + throw new ExitUtil.ExitException(EXIT_BAD_CONFIGURATION, text); default: throw new IllegalArgumentException(UNKNOWN_WARN_LEVEL + warnLevelStr); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardFsck.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardFsck.java index 1926d9ebd6242..112deca54f11e 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardFsck.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardFsck.java @@ -39,7 +39,7 @@ import com.amazonaws.services.dynamodbv2.document.internal.IteratorSupport; import com.amazonaws.services.dynamodbv2.document.spec.GetItemSpec; import com.amazonaws.services.dynamodbv2.xspec.ExpressionSpecBuilder; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTableAccess.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTableAccess.java index 7e8413b3dee7a..ca88dadeecf83 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTableAccess.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTableAccess.java @@ -39,7 +39,7 @@ import org.apache.hadoop.fs.s3a.Retries; import org.apache.hadoop.fs.s3a.S3AFileStatus; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.VERSION_MARKER_ITEM_NAME; import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.CHILD; import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.PARENT; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index 6e89d0cd2dadb..2acae76875a22 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -30,26 +30,32 @@ import java.util.Date; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Scanner; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import com.amazonaws.services.s3.model.MultipartUpload; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.time.DurationFormatUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.s3a.MultipartUtils; import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.S3AFileSystem; @@ -58,7 +64,10 @@ import org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens; import org.apache.hadoop.fs.s3a.commit.CommitConstants; import org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants; +import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy; +import org.apache.hadoop.fs.s3a.impl.DirectoryPolicyImpl; import org.apache.hadoop.fs.s3a.select.SelectTool; +import org.apache.hadoop.fs.s3a.tools.MarkerTool; import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -79,7 +88,11 @@ /** * CLI to manage S3Guard Metadata Store. + *

    + * Some management tools invoke this class directly. */ +@InterfaceAudience.LimitedPrivate("management tools") +@InterfaceStability.Evolving public abstract class S3GuardTool extends Configured implements Tool, Closeable { private static final Logger LOG = LoggerFactory.getLogger(S3GuardTool.class); @@ -98,15 +111,17 @@ public abstract class S3GuardTool extends Configured implements Tool, "Commands: \n" + "\t" + Init.NAME + " - " + Init.PURPOSE + "\n" + "\t" + Destroy.NAME + " - " + Destroy.PURPOSE + "\n" + - "\t" + Import.NAME + " - " + Import.PURPOSE + "\n" + + "\t" + Authoritative.NAME + " - " + Authoritative.PURPOSE + "\n" + "\t" + BucketInfo.NAME + " - " + BucketInfo.PURPOSE + "\n" + - "\t" + Uploads.NAME + " - " + Uploads.PURPOSE + "\n" + "\t" + Diff.NAME + " - " + Diff.PURPOSE + "\n" + + "\t" + Fsck.NAME + " - " + Fsck.PURPOSE + "\n" + + "\t" + Import.NAME + " - " + Import.PURPOSE + "\n" + + "\t" + MarkerTool.MARKERS + " - " + MarkerTool.PURPOSE + "\n" + "\t" + Prune.NAME + " - " + Prune.PURPOSE + "\n" + "\t" + SetCapacity.NAME + " - " + SetCapacity.PURPOSE + "\n" + "\t" + SelectTool.NAME + " - " + SelectTool.PURPOSE + "\n" + - "\t" + Fsck.NAME + " - " + Fsck.PURPOSE + "\n" + - "\t" + Authoritative.NAME + " - " + Authoritative.PURPOSE + "\n"; + "\t" + Uploads.NAME + " - " + Uploads.PURPOSE + "\n"; + private static final String DATA_IN_S3_IS_PRESERVED = "(all data in S3 is preserved)"; @@ -116,6 +131,7 @@ public abstract class S3GuardTool extends Configured implements Tool, static final int SUCCESS = EXIT_SUCCESS; static final int INVALID_ARGUMENT = EXIT_COMMAND_ARGUMENT_ERROR; static final int E_USAGE = EXIT_USAGE; + static final int ERROR = EXIT_FAIL; static final int E_BAD_STATE = EXIT_NOT_ACCEPTABLE; static final int E_NOT_FOUND = EXIT_NOT_FOUND; @@ -472,6 +488,14 @@ protected void setStore(MetadataStore store) { this.store = store; } + /** + * Reset the store and filesystem bindings. + */ + protected void resetBindings() { + store = null; + filesystem = null; + } + protected CommandFormat getCommandFormat() { return commandFormat; } @@ -497,6 +521,30 @@ public final int run(String[] args) throws Exception { public abstract int run(String[] args, PrintStream out) throws Exception, ExitUtil.ExitException; + /** + * Dump the filesystem Storage Statistics if the FS is not null. + * Only non-zero statistics are printed. + * @param stream output stream + */ + protected void dumpFileSystemStatistics(PrintStream stream) { + FileSystem fs = getFilesystem(); + if (fs == null) { + return; + } + println(stream, "%nStorage Statistics for %s%n", fs.getUri()); + StorageStatistics st = fs.getStorageStatistics(); + Iterator it + = st.getLongStatistics(); + while (it.hasNext()) { + StorageStatistics.LongStatistic next = it.next(); + long value = next.getValue(); + if (value != 0) { + println(stream, "%s\t%s", next.getName(), value); + } + } + println(stream, ""); + } + /** * Create the metadata store. */ @@ -711,8 +759,8 @@ public int run(String[] args, PrintStream out) throws Exception { */ static class Destroy extends S3GuardTool { public static final String NAME = "destroy"; - public static final String PURPOSE = "destroy Metadata Store data " - + DATA_IN_S3_IS_PRESERVED; + public static final String PURPOSE = "destroy the Metadata Store including its" + + " contents" + DATA_IN_S3_IS_PRESERVED; private static final String USAGE = NAME + " [OPTIONS] [s3a://BUCKET]\n" + "\t" + PURPOSE + "\n\n" + "Common options:\n" + @@ -1167,16 +1215,20 @@ public int run(String[] args, PrintStream out) throws * Get info about a bucket and its S3Guard integration status. */ public static class BucketInfo extends S3GuardTool { - public static final String NAME = "bucket-info"; + public static final String BUCKET_INFO = "bucket-info"; + public static final String NAME = BUCKET_INFO; public static final String GUARDED_FLAG = "guarded"; public static final String UNGUARDED_FLAG = "unguarded"; public static final String AUTH_FLAG = "auth"; public static final String NONAUTH_FLAG = "nonauth"; public static final String ENCRYPTION_FLAG = "encryption"; public static final String MAGIC_FLAG = "magic"; + public static final String MARKERS_FLAG = "markers"; + public static final String MARKERS_AWARE = "aware"; public static final String PURPOSE = "provide/check S3Guard information" + " about a specific bucket"; + private static final String USAGE = NAME + " [OPTIONS] s3a://BUCKET\n" + "\t" + PURPOSE + "\n\n" + "Common options:\n" @@ -1186,7 +1238,9 @@ public static class BucketInfo extends S3GuardTool { + " -" + NONAUTH_FLAG + " - Require the S3Guard mode to be \"non-authoritative\"\n" + " -" + MAGIC_FLAG + " - Require the S3 filesystem to be support the \"magic\" committer\n" + " -" + ENCRYPTION_FLAG - + " -require {none, sse-s3, sse-kms} - Require encryption policy"; + + " (none, sse-s3, sse-kms) - Require encryption policy\n" + + " -" + MARKERS_FLAG + + " (aware, keep, delete, authoritative) - directory markers policy\n"; /** * Output when the client cannot get the location of a bucket. @@ -1196,10 +1250,17 @@ public static class BucketInfo extends S3GuardTool { "Location unknown -caller lacks " + RolePolicies.S3_GET_BUCKET_LOCATION + " permission"; + + @VisibleForTesting + public static final String IS_MARKER_AWARE = + "\tThe S3A connector is compatible with buckets where" + + " directory markers are not deleted"; + public BucketInfo(Configuration conf) { super(conf, GUARDED_FLAG, UNGUARDED_FLAG, AUTH_FLAG, NONAUTH_FLAG, MAGIC_FLAG); CommandFormat format = getCommandFormat(); format.addOptionWithValue(ENCRYPTION_FLAG); + format.addOptionWithValue(MARKERS_FLAG); } @Override @@ -1268,8 +1329,9 @@ public int run(String[] args, PrintStream out) authMode = conf.getBoolean(METADATASTORE_AUTHORITATIVE, false); final long ttl = conf.getTimeDuration(METADATASTORE_METADATA_TTL, DEFAULT_METADATASTORE_METADATA_TTL, TimeUnit.MILLISECONDS); - println(out, "\tMetadata time to live: %s=%s milliseconds", - METADATASTORE_METADATA_TTL, ttl); + println(out, "\tMetadata time to live: (set in %s) = %s", + METADATASTORE_METADATA_TTL, + DurationFormatUtils.formatDurationHMS(ttl)); printStoreDiagnostics(out, store); } else { println(out, "Filesystem %s is not using S3Guard", fsUri); @@ -1384,10 +1446,59 @@ public int run(String[] args, PrintStream out) fsUri, desiredEncryption, encryption); } + // directory markers + processMarkerOption(out, fs, + getCommandFormat().getOptValue(MARKERS_FLAG)); + + // and finally flush the output and report a success. out.flush(); return SUCCESS; } + /** + * Validate the marker options. + * @param out output stream + * @param fs filesystem + * @param path test path + * @param marker desired marker option -may be null. + */ + private void processMarkerOption(final PrintStream out, + final S3AFileSystem fs, + final String marker) { + println(out, "%nSecurity"); + DirectoryPolicy markerPolicy = fs.getDirectoryMarkerPolicy(); + String desc = markerPolicy.describe(); + println(out, "\tThe directory marker policy is \"%s\"", desc); + + String pols = DirectoryPolicyImpl.availablePolicies() + .stream() + .map(DirectoryPolicy.MarkerPolicy::getOptionName) + .collect(Collectors.joining(", ")); + println(out, "\tAvailable Policies: %s", pols); + printOption(out, "\tAuthoritative paths", + AUTHORITATIVE_PATH, ""); + DirectoryPolicy.MarkerPolicy mp = markerPolicy.getMarkerPolicy(); + + String desiredMarker = marker == null + ? "" + : marker.trim(); + final String optionName = mp.getOptionName(); + if (!desiredMarker.isEmpty()) { + if (MARKERS_AWARE.equalsIgnoreCase(desiredMarker)) { + // simple awareness test -provides a way to validate compatibility + // on the command line + println(out, IS_MARKER_AWARE); + } else { + // compare with current policy + if (!optionName.equalsIgnoreCase(desiredMarker)) { + throw badState("Bucket %s: required marker policy is \"%s\"" + + " but actual policy is \"%s\"", + fs.getUri(), desiredMarker, optionName); + } + } + } + } + private String printOption(PrintStream out, String description, String key, String defVal) { String t = getFilesystem().getConf().getTrimmed(key, defVal); @@ -1511,7 +1622,7 @@ private void processUploads(PrintStream out) throws IOException { if (mode == Mode.ABORT) { getFilesystem().getWriteOperationHelper() .abortMultipartUpload(upload.getKey(), upload.getUploadId(), - LOG_EVENT); + true, LOG_EVENT); } } if (mode != Mode.EXPECT || verbose) { @@ -1991,6 +2102,9 @@ public static int run(Configuration conf, String...args) throws case Diff.NAME: command = new Diff(conf); break; + case MarkerTool.MARKERS: + command = new MarkerTool(conf); + break; case Prune.NAME: command = new Prune(conf); break; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectBinding.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectBinding.java index ff39b9ad958e6..20dc00fbc06a1 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectBinding.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectBinding.java @@ -30,7 +30,7 @@ import com.amazonaws.services.s3.model.QuoteFields; import com.amazonaws.services.s3.model.SSECustomerKey; import com.amazonaws.services.s3.model.SelectObjectContentRequest; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +44,7 @@ import org.apache.hadoop.fs.s3a.S3ObjectAttributes; import org.apache.hadoop.fs.s3a.WriteOperationHelper; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static org.apache.hadoop.fs.s3a.select.SelectConstants.*; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectInputStream.java index f4bd8d11708ef..02d1e53c7ba81 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectInputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectInputStream.java @@ -28,7 +28,7 @@ import com.amazonaws.services.s3.model.SelectObjectContentEventVisitor; import com.amazonaws.services.s3.model.SelectObjectContentResult; import com.amazonaws.services.s3.model.SelectRecordsInputStream; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,12 +39,12 @@ import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.s3a.Retries; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; import org.apache.hadoop.fs.s3a.S3AReadOpContext; import org.apache.hadoop.fs.s3a.S3ObjectAttributes; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; import org.apache.hadoop.io.IOUtils; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static org.apache.hadoop.fs.s3a.Invoker.once; import static org.apache.hadoop.fs.s3a.S3AInputStream.validateReadahead; @@ -103,7 +103,7 @@ public class SelectInputStream extends FSInputStream implements private final S3AReadOpContext readContext; - private final S3AInstrumentation.InputStreamStatistics streamStatistics; + private final S3AInputStreamStatistics streamStatistics; private long readahead; @@ -130,7 +130,7 @@ public SelectInputStream( this.uri = "s3a://" + this.bucket + "/" + this.key; this.readContext = readContext; this.readahead = readContext.getReadahead(); - this.streamStatistics = readContext.getInstrumentation() + this.streamStatistics = readContext.getS3AStatisticsContext() .newInputStreamStatistics(); SelectRecordsInputStream stream = once( "S3 Select", @@ -204,7 +204,7 @@ public synchronized long skip(final long n) throws IOException { long skipped = once("skip", uri, () -> wrappedStream.skip(n)); pos.addAndGet(skipped); // treat as a forward skip for stats - streamStatistics.seekForwards(skipped); + streamStatistics.seekForwards(skipped, skipped); return skipped; } @@ -331,7 +331,7 @@ public synchronized void seek(long newPos) throws IOException { bytesSkipped++; } // read has finished. - streamStatistics.seekForwards(bytesSkipped); + streamStatistics.seekForwards(bytesSkipped, bytesSkipped); } } @@ -428,7 +428,7 @@ private void incrementBytesRead(long bytesRead) { */ @InterfaceAudience.Private @InterfaceStability.Unstable - public S3AInstrumentation.InputStreamStatistics getS3AStreamStatistics() { + public S3AInputStreamStatistics getS3AStreamStatistics() { return streamStatistics; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/BlockOutputStreamStatistics.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/BlockOutputStreamStatistics.java new file mode 100644 index 0000000000000..b1cee718c206a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/BlockOutputStreamStatistics.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import java.io.Closeable; +import java.time.Duration; + +/** + * Block output stream statistics. + */ +public interface BlockOutputStreamStatistics extends Closeable, + S3AStatisticInterface { + + /** + * Block is queued for upload. + * @param blockSize block size. + */ + void blockUploadQueued(int blockSize); + + /** + * Queued block has been scheduled for upload. + * @param timeInQueue time in the queue. + * @param blockSize block size. + */ + void blockUploadStarted(Duration timeInQueue, int blockSize); + + /** + * A block upload has completed. Duration excludes time in the queue. + * @param timeSinceUploadStarted time in since the transfer began. + * @param blockSize block size + */ + void blockUploadCompleted(Duration timeSinceUploadStarted, int blockSize); + + /** + * A block upload has failed. Duration excludes time in the queue. + *

    + * A final transfer completed event is still expected, so this + * does not decrement the active block counter. + *

    + * @param timeSinceUploadStarted time in since the transfer began. + * @param blockSize block size + */ + void blockUploadFailed(Duration timeSinceUploadStarted, int blockSize); + + /** + * Intermediate report of bytes uploaded. + * @param byteCount bytes uploaded + */ + void bytesTransferred(long byteCount); + + /** + * Note exception in a multipart complete. + * @param count count of exceptions + */ + void exceptionInMultipartComplete(int count); + + /** + * Note an exception in a multipart abort. + */ + void exceptionInMultipartAbort(); + + /** + * Get the number of bytes pending upload. + * @return the number of bytes in the pending upload state. + */ + long getBytesPendingUpload(); + + /** + * Data has been uploaded to be committed in a subsequent operation; + * to be called at the end of the write. + * @param size size in bytes + */ + void commitUploaded(long size); + + int getBlocksAllocated(); + + int getBlocksReleased(); + + /** + * Get counters of blocks actively allocated; may be inaccurate + * if the numbers change during the (non-synchronized) calculation. + * @return the number of actively allocated blocks. + */ + int getBlocksActivelyAllocated(); + + /** + * Record bytes written. + * @param count number of bytes + */ + void writeBytes(long count); + + /** + * Get the current count of bytes written. + * @return the counter value. + */ + long getBytesWritten(); + + /** + * A block has been allocated. + */ + void blockAllocated(); + + /** + * A block has been released. + */ + void blockReleased(); + + /** + * Get the value of a counter. + * @param name counter name + * @return the value or null if no matching counter was found. + */ + Long lookupCounterValue(String name); + + /** + * Get the value of a gauge. + * @param name gauge name + * @return the value or null if no matching gauge was found. + */ + Long lookupGaugeValue(String name); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/ChangeTrackerStatistics.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/ChangeTrackerStatistics.java new file mode 100644 index 0000000000000..572581e214fbb --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/ChangeTrackerStatistics.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +/** + * Interface for change tracking statistics. + */ +public interface ChangeTrackerStatistics { + + /** + * A version mismatch was detected. + */ + void versionMismatchError(); + + /** + * How many version mismatches have occurred. + * @return counter of mismatches. + */ + long getVersionMismatches(); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/CommitterStatistics.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/CommitterStatistics.java new file mode 100644 index 0000000000000..fd232a058d0b8 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/CommitterStatistics.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +/** + * Statistics for S3A committers. + */ +public interface CommitterStatistics + extends S3AStatisticInterface { + + /** A commit has been created. */ + void commitCreated(); + + /** + * Data has been uploaded to be committed in a subsequent operation. + * @param size size in bytes + */ + void commitUploaded(long size); + + /** + * A commit has been completed. + * @param size size in bytes + */ + void commitCompleted(long size); + + /** A commit has been aborted. */ + void commitAborted(); + + /** + * A commit was reverted. + */ + void commitReverted(); + + /** + * A commit failed. + */ + void commitFailed(); + + /** + * Note that a task has completed. + * @param success success flag + */ + void taskCompleted(boolean success); + + /** + * Note that a job has completed. + * @param success success flag + */ + void jobCompleted(boolean success); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/CountersAndGauges.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/CountersAndGauges.java new file mode 100644 index 0000000000000..f9093ff7117cc --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/CountersAndGauges.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import java.time.Duration; + +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; + +/** + * This is the foundational API for collecting S3A statistics. + */ +public interface CountersAndGauges extends DurationTrackerFactory { + + /** + * Increment a specific counter. + * No-op if not defined. + * @param op operation + * @param count increment value + */ + void incrementCounter(Statistic op, long count); + + /** + * Increment a specific gauge. + * No-op if not defined. + * @param op operation + * @param count increment value + * @throws ClassCastException if the metric is of the wrong type + */ + void incrementGauge(Statistic op, long count); + + /** + * Decrement a specific gauge. + * No-op if not defined. + * @param op operation + * @param count increment value + * @throws ClassCastException if the metric is of the wrong type + */ + void decrementGauge(Statistic op, long count); + + /** + * Add a value to a quantiles statistic. No-op if the quantile + * isn't found. + * @param op operation to look up. + * @param value value to add. + * @throws ClassCastException if the metric is not a Quantiles. + */ + void addValueToQuantiles(Statistic op, long value); + + /** + * Record a duration. + * @param op operation + * @param success was the operation a success? + * @param duration how long did it take + */ + void recordDuration(Statistic op, boolean success, Duration duration); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/DelegationTokenStatistics.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/DelegationTokenStatistics.java new file mode 100644 index 0000000000000..3ab9224f3c568 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/DelegationTokenStatistics.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +/** + * Instrumentation exported to for S3A Delegation Token support. + */ +public interface DelegationTokenStatistics extends S3AStatisticInterface { + + /** A token has been issued. */ + void tokenIssued(); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AInputStreamStatistics.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AInputStreamStatistics.java new file mode 100644 index 0000000000000..328d9f7c4ce4a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AInputStreamStatistics.java @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import org.apache.hadoop.fs.statistics.DurationTracker; + +/** + * Statistics updated by a + * {@link org.apache.hadoop.fs.s3a.S3AInputStream} during its use. + * It also contains getters for tests. + */ +public interface S3AInputStreamStatistics extends AutoCloseable, + S3AStatisticInterface { + + /** + * Seek backwards, incrementing the seek and backward seek counters. + * @param negativeOffset how far was the seek? + * This is expected to be negative. + */ + void seekBackwards(long negativeOffset); + + /** + * Record a forward seek, adding a seek operation, a forward + * seek operation, and any bytes skipped. + * @param skipped bytes moved forward in stream + * @param bytesReadInSeek number of bytes skipped by reading from the stream. + * If the seek was implemented by a close + reopen, set this to zero. + */ + void seekForwards(long skipped, long bytesReadInSeek); + + /** + * The inner stream was opened. + * The return value is used in the input stream to decide whether it is + * the initial vs later count. + * @return the previous count or zero if this is the first opening. + */ + long streamOpened(); + + /** + * The inner stream was closed. + * @param abortedConnection flag to indicate the stream was aborted, + * rather than closed cleanly + * @param remainingInCurrentRequest the number of bytes remaining in + * the current request. + */ + void streamClose(boolean abortedConnection, + long remainingInCurrentRequest); + + /** + * An ignored stream read exception was received. + */ + void readException(); + + /** + * Increment the bytes read counter by the number of bytes; + * no-op if the argument is negative. + * @param bytes number of bytes read + */ + void bytesRead(long bytes); + + /** + * A {@code read(byte[] buf, int off, int len)} operation has started. + * @param pos starting position of the read + * @param len length of bytes to read + */ + void readOperationStarted(long pos, long len); + + /** + * A {@code PositionedRead.read(position, buffer, offset, length)} + * operation has just started. + * @param pos starting position of the read + * @param len length of bytes to read + */ + void readFullyOperationStarted(long pos, long len); + + /** + * A read operation has completed. + * @param requested number of requested bytes + * @param actual the actual number of bytes + */ + void readOperationCompleted(int requested, int actual); + + @Override + void close(); + + /** + * The input policy has been switched. + * @param updatedPolicy enum value of new policy. + */ + void inputPolicySet(int updatedPolicy); + + /** + * Get a reference to the change tracker statistics for this + * stream. + * @return a reference to the change tracker statistics + */ + ChangeTrackerStatistics getChangeTrackerStatistics(); + + /** + * A stream {@code unbuffer()} call has been made. + */ + void unbuffered(); + + long getCloseOperations(); + + long getClosed(); + + long getAborted(); + + long getForwardSeekOperations(); + + long getBackwardSeekOperations(); + + /** + * The bytes read in read() operations. + * @return the number of bytes returned to the caller. + */ + long getBytesRead(); + + /** + * The total number of bytes read, including + * all read and discarded when closing streams + * or skipped during seek calls. + * @return the total number of bytes read from + * S3. + */ + long getTotalBytesRead(); + + long getBytesSkippedOnSeek(); + + long getBytesBackwardsOnSeek(); + + long getBytesReadInClose(); + + long getBytesDiscardedInAbort(); + + long getOpenOperations(); + + long getSeekOperations(); + + long getReadExceptions(); + + long getReadOperations(); + + long getReadFullyOperations(); + + long getReadsIncomplete(); + + long getPolicySetCount(); + + long getVersionMismatches(); + + long getInputPolicy(); + + /** + * Get the value of a counter. + * @param name counter name + * @return the value or null if no matching counter was found. + */ + Long lookupCounterValue(String name); + + /** + * Get the value of a gauge. + * @param name gauge name + * @return the value or null if no matching gauge was found. + */ + Long lookupGaugeValue(String name); + + /** + * Initiate a GET request. + * @return duration tracker; + */ + DurationTracker initiateGetRequest(); + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AMultipartUploaderStatistics.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AMultipartUploaderStatistics.java new file mode 100644 index 0000000000000..2aa7b341af679 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AMultipartUploaderStatistics.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import java.io.Closeable; + +/** + * Statistics for the S3A multipart uploader. + * It is expected to act as the statistics source for + * the uploader. + */ +public interface S3AMultipartUploaderStatistics extends Closeable, + S3AStatisticInterface { + + void instantiated(); + + void uploadStarted(); + + void partPut(long lengthInBytes); + + void uploadCompleted(); + + void uploadAborted(); + + void abortUploadsUnderPathInvoked(); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AStatisticInterface.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AStatisticInterface.java new file mode 100644 index 0000000000000..836f2038d129b --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AStatisticInterface.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; + +/** + * Base Interface for S3A Statistics, as a way of managing + * them. + */ +public interface S3AStatisticInterface extends IOStatisticsSource, + DurationTrackerFactory { + + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AStatisticsContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AStatisticsContext.java new file mode 100644 index 0000000000000..27f1398d4ea51 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/S3AStatisticsContext.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import org.apache.hadoop.fs.s3a.s3guard.MetastoreInstrumentation; + +/** + * This is the statistics context for ongoing operations in S3A. + */ +public interface S3AStatisticsContext extends CountersAndGauges { + + /** + * Get the metastore instrumentation. + * @return an instance of the metastore statistics tracking. + */ + MetastoreInstrumentation getS3GuardInstrumentation(); + + /** + * Create a stream input statistics instance. + * @return the new instance + */ + S3AInputStreamStatistics newInputStreamStatistics(); + + /** + * Create a new instance of the committer statistics. + * @return a new committer statistics instance + */ + CommitterStatistics newCommitterStatistics(); + + /** + * Create a stream output statistics instance. + * @return the new instance + */ + BlockOutputStreamStatistics newOutputStreamStatistics(); + + /** + * Create a delegation token statistics instance. + * @return an instance of delegation token statistics + */ + DelegationTokenStatistics newDelegationTokenStatistics(); + + /** + * Create a StatisticsFromAwsSdk instance. + * @return an instance of StatisticsFromAwsSdk + */ + StatisticsFromAwsSdk newStatisticsFromAwsSdk(); + + /** + * Creaet a multipart statistics collector. + * @return an instance + */ + S3AMultipartUploaderStatistics createMultipartUploaderStatistics(); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/StatisticTypeEnum.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/StatisticTypeEnum.java new file mode 100644 index 0000000000000..ac7bd9b5e7b49 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/StatisticTypeEnum.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +/** + * Enum of statistic types. + */ +public enum StatisticTypeEnum { + + /** + * Counter. + */ + TYPE_COUNTER, + + /** + * Duration. + */ + TYPE_DURATION, + + /** + * Gauge. + */ + TYPE_GAUGE, + + /** + * Quantile. + */ + TYPE_QUANTILE, + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/StatisticsFromAwsSdk.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/StatisticsFromAwsSdk.java new file mode 100644 index 0000000000000..2fd7c685ccdd8 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/StatisticsFromAwsSdk.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import java.time.Duration; + +import org.apache.hadoop.fs.s3a.statistics.impl.AwsStatisticsCollector; + +/** + * interface to receive statistics events from the AWS SDK + * by way of {@link AwsStatisticsCollector}. + */ +public interface StatisticsFromAwsSdk { + + /** + * Record a number of AWS requests. + * @param count number of events. + */ + void updateAwsRequestCount(long count); + + /** + * Record a number of AWS request retries. + * @param count number of events. + */ + void updateAwsRetryCount(long count); + + /** + * Record a number of throttle exceptions received. + * @param count number of events. + */ + void updateAwsThrottleExceptionsCount(long count); + + /** + * Record how long a request took overall. + * @param duration duration of operation. + */ + void noteAwsRequestTime(Duration duration); + + /** + * Record how long a request took to execute on the + * client. + * @param duration duration of operation. + */ + void noteAwsClientExecuteTime(Duration duration); + + /** + * Record how long a request took to marshall into + * XML. + * @param duration duration of operation. + */ + void noteRequestMarshallTime(Duration duration); + + /** + * Record how long a request took to sign, including + * any calls to EC2 credential endpoints. + * @param duration duration of operation. + */ + void noteRequestSigningTime(Duration duration); + + /** + * Record how long it took to process the response. + * @param duration duration of operation. + */ + void noteResponseProcessingTime(Duration duration); +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/AbstractS3AStatisticsSource.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/AbstractS3AStatisticsSource.java new file mode 100644 index 0000000000000..6b7eb245e49ff --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/AbstractS3AStatisticsSource.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics.impl; + +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; + +/** + * Base class for implementing IOStatistics sources in the S3 module. + *

    + * A lot of the methods are very terse, because S3AInstrumentation has + * verbose methods of similar names; the short ones always + * refer to the inner class and not any superclass method. + *

    + */ +public abstract class AbstractS3AStatisticsSource implements + IOStatisticsSource, DurationTrackerFactory { + + private IOStatisticsStore ioStatistics; + + protected AbstractS3AStatisticsSource() { + } + + @Override + public IOStatisticsStore getIOStatistics() { + return ioStatistics; + } + + /** + * Setter. + * this must be called in the subclass constructor with + * whatever + * @param statistics statistics to set + */ + protected void setIOStatistics(final IOStatisticsStore statistics) { + this.ioStatistics = statistics; + } + + /** + * Increment a named counter by 1. + * @param name counter name + * @return the updated value or, if the counter is unknown: 0 + */ + public long incCounter(String name) { + return incCounter(name, 1); + } + + /**DefaultS3ClientFactoryDefaultS3ClientFactory + * Increment a named counter by 1. + * @param name counter name + * @param value value to increment by + * @return the updated value or, if the counter is unknown: 0 + */ + public long incCounter(String name, long value) { + return ioStatistics.incrementCounter(name, value); + } + + /** + * {@inheritDoc} + */ + public Long lookupCounterValue(final String name) { + return ioStatistics.counters().get(name); + } + + /** + * {@inheritDoc} + */ + public Long lookupGaugeValue(final String name) { + return ioStatistics.gauges().get(name); + } + + public long incGauge(String name, long v) { + return ioStatistics.incrementGauge(name, v); + } + + public long incGauge(String name) { + return incGauge(name, 1); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "AbstractS3AStatisticsSource{"); + sb.append(ioStatistics); + sb.append('}'); + return sb.toString(); + } + + @Override + public DurationTracker trackDuration(final String key, final long count) { + return getIOStatistics().trackDuration(key, count); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/AwsStatisticsCollector.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/AwsStatisticsCollector.java new file mode 100644 index 0000000000000..c002a4a6dee1d --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/AwsStatisticsCollector.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics.impl; + +import java.time.Duration; +import java.util.function.Consumer; +import java.util.function.LongConsumer; + +import com.amazonaws.Request; +import com.amazonaws.Response; +import com.amazonaws.metrics.RequestMetricCollector; +import com.amazonaws.util.TimingInfo; + +import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; + +import static com.amazonaws.util.AWSRequestMetrics.Field.ClientExecuteTime; +import static com.amazonaws.util.AWSRequestMetrics.Field.HttpClientRetryCount; +import static com.amazonaws.util.AWSRequestMetrics.Field.HttpRequestTime; +import static com.amazonaws.util.AWSRequestMetrics.Field.RequestCount; +import static com.amazonaws.util.AWSRequestMetrics.Field.RequestMarshallTime; +import static com.amazonaws.util.AWSRequestMetrics.Field.RequestSigningTime; +import static com.amazonaws.util.AWSRequestMetrics.Field.ResponseProcessingTime; +import static com.amazonaws.util.AWSRequestMetrics.Field.ThrottleException; + +/** + * Collect statistics from the AWS SDK and forward to an instance of + * {@link StatisticsFromAwsSdk} and thence into the S3A statistics. + *

    + * See {@code com.facebook.presto.hive.s3.PrestoS3FileSystemMetricCollector} + * for the inspiration for this. + *

    + * See {@code com.amazonaws.util.AWSRequestMetrics} for metric names. + */ +public class AwsStatisticsCollector extends RequestMetricCollector { + + /** + * final destination of updates. + */ + private final StatisticsFromAwsSdk collector; + + /** + * Instantiate. + * @param collector final destination of updates + */ + public AwsStatisticsCollector(final StatisticsFromAwsSdk collector) { + this.collector = collector; + } + + /** + * This is the callback from the AWS SDK where metrics + * can be collected. + * @param request AWS request + * @param response AWS response + */ + @Override + public void collectMetrics( + final Request request, + final Response response) { + + TimingInfo timingInfo = request.getAWSRequestMetrics().getTimingInfo(); + + counter(timingInfo, HttpClientRetryCount.name(), + collector::updateAwsRetryCount); + counter(timingInfo, RequestCount.name(), + collector::updateAwsRequestCount); + counter(timingInfo, ThrottleException.name(), + collector::updateAwsThrottleExceptionsCount); + + timing(timingInfo, ClientExecuteTime.name(), + collector::noteAwsClientExecuteTime); + timing(timingInfo, HttpRequestTime.name(), + collector::noteAwsRequestTime); + timing(timingInfo, RequestMarshallTime.name(), + collector::noteRequestMarshallTime); + timing(timingInfo, RequestSigningTime.name(), + collector::noteRequestSigningTime); + timing(timingInfo, ResponseProcessingTime.name(), + collector::noteResponseProcessingTime); + } + + /** + * Process a timing. + * @param timingInfo timing info + * @param subMeasurementName sub measurement + * @param durationConsumer consumer + */ + private void timing( + TimingInfo timingInfo, + String subMeasurementName, + Consumer durationConsumer) { + TimingInfo t1 = timingInfo.getSubMeasurement(subMeasurementName); + if (t1 != null && t1.getTimeTakenMillisIfKnown() != null) { + durationConsumer.accept(Duration.ofMillis( + t1.getTimeTakenMillisIfKnown().longValue())); + } + } + + /** + * Process a counter. + * @param timingInfo timing info + * @param subMeasurementName sub measurement + * @param consumer consumer + */ + private void counter( + TimingInfo timingInfo, + String subMeasurementName, + LongConsumer consumer) { + Number n = timingInfo.getCounter(subMeasurementName); + if (n != null) { + consumer.accept(n.longValue()); + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/BondedS3AStatisticsContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/BondedS3AStatisticsContext.java new file mode 100644 index 0000000000000..51bb4afebc4ff --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/BondedS3AStatisticsContext.java @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics.impl; + +import javax.annotation.Nullable; + +import java.time.Duration; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.s3a.S3AInstrumentation; +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.s3guard.MetastoreInstrumentation; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; +import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; +import org.apache.hadoop.fs.s3a.statistics.DelegationTokenStatistics; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; +import org.apache.hadoop.fs.s3a.statistics.S3AMultipartUploaderStatistics; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; +import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; +import org.apache.hadoop.fs.statistics.DurationTracker; + +/** + * An S3A statistics context which is bonded to a + * S3AInstrumentation instance -inevitably that of an S3AFileSystem + * instance. + *

    + * An interface is used to bind to the relevant fields, rather + * than have them passed in the constructor because some + * production code, specifically, DelegateToFileSystem, + * patches the protected field after initialization. + *

    + *

    + * All operations are passed through directly to that class. + *

    + *

    + * If an instance of FileSystem.Statistics is passed in, it + * will be used whenever input stream statistics are created - + * However, Internally always increments the statistics in the + * current thread. + *

    + *

    + * As a result, cross-thread IO will under-report. + *

    + * + * This is addressed through the stream statistics classes + * only updating the stats in the close() call. Provided + * they are closed in the worker thread, all stats collected in + * helper threads will be included. + */ +public class BondedS3AStatisticsContext implements S3AStatisticsContext { + + /** Source of statistics services. */ + private final S3AFSStatisticsSource statisticsSource; + + /** + * Instantiate. + * @param statisticsSource integration binding + */ + public BondedS3AStatisticsContext( + final S3AFSStatisticsSource statisticsSource) { + this.statisticsSource = statisticsSource; + } + + + /** + * Get the instrumentation from the FS integration. + * @return instrumentation instance. + */ + private S3AInstrumentation getInstrumentation() { + return statisticsSource.getInstrumentation(); + } + + /** + * The filesystem statistics: know this is thread-local. + * @return FS statistics. + */ + private FileSystem.Statistics getInstanceStatistics() { + return statisticsSource.getInstanceStatistics(); + } + + /** + * Get a MetastoreInstrumentation getInstrumentation() instance for this + * context. + * @return the S3Guard getInstrumentation() point. + */ + @Override + public MetastoreInstrumentation getS3GuardInstrumentation() { + return getInstrumentation().getS3GuardInstrumentation(); + } + + /** + * Create a stream input statistics instance. + * The FileSystem.Statistics instance of the {@link #statisticsSource} + * is used as the reference to FileSystem statistics to update + * @return the new instance + */ + @Override + public S3AInputStreamStatistics newInputStreamStatistics() { + return getInstrumentation().newInputStreamStatistics( + statisticsSource.getInstanceStatistics()); + } + + /** + * Create a new instance of the committer statistics. + * @return a new committer statistics instance + */ + @Override + public CommitterStatistics newCommitterStatistics() { + return getInstrumentation().newCommitterStatistics(); + } + + /** + * Create a stream output statistics instance. + * @return the new instance + */ + @Override + public BlockOutputStreamStatistics newOutputStreamStatistics() { + return getInstrumentation() + .newOutputStreamStatistics(getInstanceStatistics()); + } + + /** + * Increment a specific counter. + *

    + * No-op if not defined. + * @param op operation + * @param count increment value + */ + @Override + public void incrementCounter(Statistic op, long count) { + getInstrumentation().incrementCounter(op, count); + } + + /** + * Increment a specific gauge. + *

    + * No-op if not defined. + * @param op operation + * @param count increment value + * @throws ClassCastException if the metric is of the wrong type + */ + @Override + public void incrementGauge(Statistic op, long count) { + getInstrumentation().incrementGauge(op, count); + } + + /** + * Decrement a specific gauge. + *

    + * No-op if not defined. + * @param op operation + * @param count increment value + * @throws ClassCastException if the metric is of the wrong type + */ + @Override + public void decrementGauge(Statistic op, long count) { + getInstrumentation().decrementGauge(op, count); + } + + /** + * Add a value to a quantiles statistic. No-op if the quantile + * isn't found. + * @param op operation to look up. + * @param value value to add. + * @throws ClassCastException if the metric is not a Quantiles. + */ + @Override + public void addValueToQuantiles(Statistic op, long value) { + getInstrumentation().addValueToQuantiles(op, value); + } + + @Override + public void recordDuration(final Statistic op, + final boolean success, + final Duration duration) { + getInstrumentation().recordDuration(op, success, duration); + } + + /** + * Create a delegation token statistics instance. + * @return an instance of delegation token statistics + */ + @Override + public DelegationTokenStatistics newDelegationTokenStatistics() { + return getInstrumentation().newDelegationTokenStatistics(); + } + + @Override + public StatisticsFromAwsSdk newStatisticsFromAwsSdk() { + return new StatisticsFromAwsSdkImpl(getInstrumentation()); + } + + @Override + public S3AMultipartUploaderStatistics createMultipartUploaderStatistics() { + return new S3AMultipartUploaderStatisticsImpl(this::incrementCounter); + } + + @Override + public DurationTracker trackDuration(final String key, final long count) { + return getInstrumentation().trackDuration(key, count); + } + + /** + * This is the interface which an integration source must implement + * for the integration. + * Note that the FileSystem.statistics field may be null for a class; + */ + public interface S3AFSStatisticsSource { + + /** + * Get the S3A Instrumentation. + * @return a non-null instrumentation instance + */ + S3AInstrumentation getInstrumentation(); + + /** + * Get the statistics of the FS instance, shared across all threads. + * @return filesystem statistics + */ + @Nullable + FileSystem.Statistics getInstanceStatistics(); + + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/CountingChangeTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/CountingChangeTracker.java new file mode 100644 index 0000000000000..18157740c8ba0 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/CountingChangeTracker.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics.impl; + +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.fs.s3a.statistics.ChangeTrackerStatistics; + +/** + * A change tracker which increments an atomic long. + */ +public class CountingChangeTracker implements + ChangeTrackerStatistics { + + /** + * The counter which is updated on every mismatch. + */ + private final AtomicLong counter; + + public CountingChangeTracker(final AtomicLong counter) { + this.counter = counter; + } + + public CountingChangeTracker() { + this(new AtomicLong()); + } + + @Override + public void versionMismatchError() { + counter.incrementAndGet(); + } + + @Override + public long getVersionMismatches() { + return counter.get(); + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/EmptyS3AStatisticsContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/EmptyS3AStatisticsContext.java new file mode 100644 index 0000000000000..c8cd80592088b --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/EmptyS3AStatisticsContext.java @@ -0,0 +1,597 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics.impl; + +import java.io.IOException; +import java.time.Duration; + +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.s3guard.MetastoreInstrumentation; +import org.apache.hadoop.fs.s3a.s3guard.MetastoreInstrumentationImpl; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; +import org.apache.hadoop.fs.s3a.statistics.ChangeTrackerStatistics; +import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; +import org.apache.hadoop.fs.s3a.statistics.DelegationTokenStatistics; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; +import org.apache.hadoop.fs.s3a.statistics.S3AMultipartUploaderStatistics; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticInterface; +import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; +import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.DurationTracker; + +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.emptyStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.stubDurationTracker; + +/** + * Special statistics context, all of whose context operations are no-ops. + * All statistics instances it returns are also empty. + *

    + * This class is here primarily to aid in testing, but it also allows for + * classes to require a non-empty statistics context in their constructor -yet + * still be instantiated without one bound to any filesystem. + */ +public final class EmptyS3AStatisticsContext implements S3AStatisticsContext { + + public static final MetastoreInstrumentation + METASTORE_INSTRUMENTATION = new MetastoreInstrumentationImpl(); + + public static final S3AInputStreamStatistics + EMPTY_INPUT_STREAM_STATISTICS = new EmptyInputStreamStatistics(); + + public static final CommitterStatistics + EMPTY_COMMITTER_STATISTICS = new EmptyCommitterStatistics(); + + @SuppressWarnings("IOResourceOpenedButNotSafelyClosed") + public static final BlockOutputStreamStatistics + EMPTY_BLOCK_OUTPUT_STREAM_STATISTICS + = new EmptyBlockOutputStreamStatistics(); + + public static final DelegationTokenStatistics + EMPTY_DELEGATION_TOKEN_STATISTICS = new EmptyDelegationTokenStatistics(); + + public static final StatisticsFromAwsSdk + EMPTY_STATISTICS_FROM_AWS_SDK = new EmptyStatisticsFromAwsSdk(); + + @Override + public MetastoreInstrumentation getS3GuardInstrumentation() { + return METASTORE_INSTRUMENTATION; + } + + @Override + public S3AInputStreamStatistics newInputStreamStatistics() { + return EMPTY_INPUT_STREAM_STATISTICS; + } + + @Override + public CommitterStatistics newCommitterStatistics() { + return EMPTY_COMMITTER_STATISTICS; + } + + @Override + public BlockOutputStreamStatistics newOutputStreamStatistics() { + return EMPTY_BLOCK_OUTPUT_STREAM_STATISTICS; + } + + @Override + public DelegationTokenStatistics newDelegationTokenStatistics() { + return EMPTY_DELEGATION_TOKEN_STATISTICS; + } + + @Override + public StatisticsFromAwsSdk newStatisticsFromAwsSdk() { + return EMPTY_STATISTICS_FROM_AWS_SDK; + } + + @Override + public S3AMultipartUploaderStatistics createMultipartUploaderStatistics() { + return new EmptyMultipartUploaderStatistics(); + } + + @Override + public void incrementCounter(final Statistic op, final long count) { + + } + + @Override + public void incrementGauge(final Statistic op, final long count) { + + } + + @Override + public void decrementGauge(final Statistic op, final long count) { + + } + + @Override + public void addValueToQuantiles(final Statistic op, final long value) { + + } + + @Override + public void recordDuration(final Statistic op, + final boolean success, + final Duration duration) { + + } + + /** + * Base class for all the empty implementations. + */ + private static class EmptyS3AStatisticImpl implements + S3AStatisticInterface { + + /** + * Always return the stub duration tracker. + * @param key statistic key prefix + * @param count #of times to increment the matching counter in this + * operation. + * @return stub tracker. + */ + public DurationTracker trackDuration(String key, long count) { + return stubDurationTracker(); + } + } + + /** + * Input Stream statistics callbacks. + */ + private static final class EmptyInputStreamStatistics + extends EmptyS3AStatisticImpl + implements S3AInputStreamStatistics { + + @Override + public void seekBackwards(final long negativeOffset) { + + } + + @Override + public void seekForwards(final long skipped, + final long bytesReadInSeek) { + + } + + @Override + public long streamOpened() { + return 0; + } + + @Override + public void streamClose(final boolean abortedConnection, + final long remainingInCurrentRequest) { + + } + + @Override + public void readException() { + + } + + @Override + public void bytesRead(final long bytes) { + + } + + @Override + public void readOperationStarted(final long pos, final long len) { + + } + + @Override + public void readFullyOperationStarted(final long pos, final long len) { + + } + + @Override + public void readOperationCompleted(final int requested, final int actual) { + + } + + @Override + public void close() { + + } + + @Override + public void inputPolicySet(final int updatedPolicy) { + + } + + @Override + public void unbuffered() { + + } + + /** + * Return an IO statistics instance. + * @return an empty IO statistics instance. + */ + @Override + public IOStatistics getIOStatistics() { + return emptyStatistics(); + } + + @Override + public long getCloseOperations() { + return 0; + } + + @Override + public long getClosed() { + return 0; + } + + @Override + public long getAborted() { + return 0; + } + + @Override + public long getForwardSeekOperations() { + return 0; + } + + @Override + public long getBackwardSeekOperations() { + return 0; + } + + @Override + public long getBytesRead() { + return 0; + } + + @Override + public long getTotalBytesRead() { + return 0; + } + + @Override + public long getBytesSkippedOnSeek() { + return 0; + } + + @Override + public long getBytesBackwardsOnSeek() { + return 0; + } + + @Override + public long getBytesReadInClose() { + return 0; + } + + @Override + public long getBytesDiscardedInAbort() { + return 0; + } + + @Override + public long getOpenOperations() { + return 0; + } + + @Override + public long getSeekOperations() { + return 0; + } + + @Override + public long getReadExceptions() { + return 0; + } + + @Override + public long getReadOperations() { + return 0; + } + + @Override + public long getReadFullyOperations() { + return 0; + } + + @Override + public long getReadsIncomplete() { + return 0; + } + + @Override + public long getPolicySetCount() { + return 0; + } + + @Override + public long getVersionMismatches() { + return 0; + } + + @Override + public long getInputPolicy() { + return 0; + } + + @Override + public Long lookupCounterValue(final String name) { + return 0L; + } + + @Override + public Long lookupGaugeValue(final String name) { + return 0L; + } + + @Override + public ChangeTrackerStatistics getChangeTrackerStatistics() { + return new CountingChangeTracker(); + } + + @Override + public DurationTracker initiateGetRequest() { + return stubDurationTracker(); + } + + } + + /** + * Committer statistics. + */ + private static final class EmptyCommitterStatistics + extends EmptyS3AStatisticImpl + implements CommitterStatistics { + + @Override + public void commitCreated() { + } + + @Override + public void commitUploaded(final long size) { + } + + @Override + public void commitCompleted(final long size) { + } + + @Override + public void commitAborted() { + } + + @Override + public void commitReverted() { + } + + @Override + public void commitFailed() { + } + + @Override + public void taskCompleted(final boolean success) { + } + + @Override + public void jobCompleted(final boolean success) { + } + } + + private static final class EmptyBlockOutputStreamStatistics + extends EmptyS3AStatisticImpl + implements BlockOutputStreamStatistics { + + @Override + public void blockUploadQueued(final int blockSize) { + } + + @Override + public void blockUploadStarted(final Duration timeInQueue, + final int blockSize) { + } + + @Override + public void blockUploadCompleted(final Duration timeSinceUploadStarted, + final int blockSize) { + } + + @Override + public void blockUploadFailed(final Duration timeSinceUploadStarted, + final int blockSize) { + } + + @Override + public void bytesTransferred(final long byteCount) { + } + + @Override + public void exceptionInMultipartComplete(final int count) { + + } + + @Override + public void exceptionInMultipartAbort() { + } + + @Override + public long getBytesPendingUpload() { + return 0; + } + + @Override + public void commitUploaded(final long size) { + + } + + @Override + public int getBlocksAllocated() { + return 0; + } + + @Override + public int getBlocksReleased() { + return 0; + } + + @Override + public int getBlocksActivelyAllocated() { + return 0; + } + + @Override + public IOStatistics getIOStatistics() { + return emptyStatistics(); + } + + @Override + public void blockAllocated() { + } + + @Override + public void blockReleased() { + } + + @Override + public void writeBytes(final long count) { + } + + @Override + public long getBytesWritten() { + return 0; + } + + @Override + public Long lookupCounterValue(final String name) { + return 0L; + } + + @Override + public Long lookupGaugeValue(final String name) { + return 0L; + } + + @Override + public void close() throws IOException { + } + + } + + /** + * Delegation Token Statistics. + */ + private static final class EmptyDelegationTokenStatistics + extends EmptyS3AStatisticImpl + implements DelegationTokenStatistics { + + @Override + public void tokenIssued() { + + } + } + + /** + * AWS SDK Callbacks. + */ + private static final class EmptyStatisticsFromAwsSdk + implements StatisticsFromAwsSdk { + + @Override + public void updateAwsRequestCount(final long longValue) { + + } + + @Override + public void updateAwsRetryCount(final long longValue) { + + } + + @Override + public void updateAwsThrottleExceptionsCount(final long longValue) { + + } + + @Override + public void noteAwsRequestTime(final Duration ofMillis) { + + } + + @Override + public void noteAwsClientExecuteTime(final Duration ofMillis) { + + } + + @Override + public void noteRequestMarshallTime(final Duration duration) { + + } + + @Override + public void noteRequestSigningTime(final Duration duration) { + + } + + @Override + public void noteResponseProcessingTime(final Duration duration) { + + } + } + + /** + * Multipart Uploader. + */ + public static final class EmptyMultipartUploaderStatistics + implements S3AMultipartUploaderStatistics { + + @Override + public void instantiated() { + + } + + @Override + public void uploadStarted() { + + } + + @Override + public void partPut(final long lengthInBytes) { + + } + + @Override + public void uploadCompleted() { + + } + + @Override + public void uploadAborted() { + + } + + @Override + public void abortUploadsUnderPathInvoked() { + + } + + @Override + public void close() throws IOException { + + } + + @Override + public DurationTracker trackDuration(final String key, final long count) { + return stubDurationTracker(); + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/S3AMultipartUploaderStatisticsImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/S3AMultipartUploaderStatisticsImpl.java new file mode 100644 index 0000000000000..7b6d559cf2099 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/S3AMultipartUploaderStatisticsImpl.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics.impl; + + +import java.io.IOException; +import java.util.Objects; +import java.util.function.BiConsumer; + +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.statistics.S3AMultipartUploaderStatistics; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; + +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_ABORTED; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_COMPLETED; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_INSTANTIATED; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_PART_PUT; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_PART_PUT_BYTES; +import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_STARTED; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; + +/** + * Implementation of the uploader statistics. + *

    + * This takes a function to update some counter and will update + * this value when things change, so it can be bonded to arbitrary + * statistic collectors. + *

    + *

    + * Internally it builds a map of the relevant multipart statistics, + * increments as appropriate and serves this data back through + * the {@code IOStatisticsSource} API. + *

    + */ +public final class S3AMultipartUploaderStatisticsImpl + extends AbstractS3AStatisticsSource + implements S3AMultipartUploaderStatistics { + + /** + * The operation to increment a counter/statistic by a value. + */ + private final BiConsumer incrementCallback; + + /** + * Constructor. + * @param incrementCallback The operation to increment a + * counter/statistic by a value. + */ + public S3AMultipartUploaderStatisticsImpl( + final BiConsumer incrementCallback) { + this.incrementCallback = Objects.requireNonNull(incrementCallback); + IOStatisticsStore st = iostatisticsStore() + .withCounters( + MULTIPART_UPLOAD_INSTANTIATED.getSymbol(), + MULTIPART_UPLOAD_PART_PUT.getSymbol(), + MULTIPART_UPLOAD_PART_PUT_BYTES.getSymbol(), + MULTIPART_UPLOAD_ABORTED.getSymbol(), + MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED.getSymbol(), + MULTIPART_UPLOAD_COMPLETED.getSymbol(), + MULTIPART_UPLOAD_STARTED.getSymbol()) + .build(); + setIOStatistics(st); + } + + private void inc(Statistic op, long count) { + incrementCallback.accept(op, count); + incCounter(op.getSymbol(), count); + } + + @Override + public void instantiated() { + inc(MULTIPART_UPLOAD_INSTANTIATED, 1); + } + + @Override + public void uploadStarted() { + inc(MULTIPART_UPLOAD_STARTED, 1); + } + + @Override + public void partPut(final long lengthInBytes) { + inc(MULTIPART_UPLOAD_PART_PUT, 1); + inc(MULTIPART_UPLOAD_PART_PUT_BYTES, lengthInBytes); + } + + @Override + public void uploadCompleted() { + inc(MULTIPART_UPLOAD_COMPLETED, 1); + } + + @Override + public void uploadAborted() { + inc(MULTIPART_UPLOAD_ABORTED, 1); + } + + @Override + public void abortUploadsUnderPathInvoked() { + inc(MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED, 1); + } + + @Override + public void close() throws IOException { + + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/StatisticsFromAwsSdkImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/StatisticsFromAwsSdkImpl.java new file mode 100644 index 0000000000000..48b0b2bf454d3 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/StatisticsFromAwsSdkImpl.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics.impl; + +import java.time.Duration; + +import org.apache.hadoop.fs.s3a.statistics.CountersAndGauges; +import org.apache.hadoop.fs.s3a.statistics.StatisticsFromAwsSdk; + +import static org.apache.hadoop.fs.s3a.Statistic.STORE_IO_REQUEST; +import static org.apache.hadoop.fs.s3a.Statistic.STORE_IO_RETRY; +import static org.apache.hadoop.fs.s3a.Statistic.STORE_IO_THROTTLED; +import static org.apache.hadoop.fs.s3a.Statistic.STORE_IO_THROTTLE_RATE; + +/** + * Hook up AWS SDK Statistics to the S3 counters. + *

    + * Durations are not currently being used; that could be + * changed in future once an effective strategy for reporting + * them is determined. + */ +public final class StatisticsFromAwsSdkImpl implements + StatisticsFromAwsSdk { + + private final CountersAndGauges countersAndGauges; + + public StatisticsFromAwsSdkImpl( + final CountersAndGauges countersAndGauges) { + this.countersAndGauges = countersAndGauges; + } + + @Override + public void updateAwsRequestCount(final long count) { + countersAndGauges.incrementCounter(STORE_IO_REQUEST, count); + } + + @Override + public void updateAwsRetryCount(final long count) { + countersAndGauges.incrementCounter(STORE_IO_RETRY, count); + } + + @Override + public void updateAwsThrottleExceptionsCount(final long count) { + countersAndGauges.incrementCounter(STORE_IO_THROTTLED, count); + countersAndGauges.addValueToQuantiles(STORE_IO_THROTTLE_RATE, count); + } + + @Override + public void noteAwsRequestTime(final Duration duration) { + + } + + @Override + public void noteAwsClientExecuteTime(final Duration duration) { + + } + + @Override + public void noteRequestMarshallTime(final Duration duration) { + + } + + @Override + public void noteRequestSigningTime(final Duration duration) { + + } + + @Override + public void noteResponseProcessingTime(final Duration duration) { + + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/package-info.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/package-info.java new file mode 100644 index 0000000000000..6b94ac767521e --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/impl/package-info.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Statistics collection for the S3A connector: implementation. + * Not for use by anything outside the hadoop-aws source tree. + */ + +@InterfaceAudience.Private +@InterfaceStability.Unstable +package org.apache.hadoop.fs.s3a.statistics.impl; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/package-info.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/package-info.java new file mode 100644 index 0000000000000..b74bc3e25286e --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/statistics/package-info.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Statistics collection for the S3A connector: interfaces. + * This is private, though there's a risk that some extension + * points (delegation tokens?) may need access to the internal + * API. Hence the split packaging...with a java 9 module, the + * implementation classes would be declared internal. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +package org.apache.hadoop.fs.s3a.statistics; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java new file mode 100644 index 0000000000000..f1fde7b353515 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java @@ -0,0 +1,1022 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.tools; + +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.PrintStream; +import java.io.Writer; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.MultiObjectDeleteException; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.Retries; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.S3ALocatedFileStatus; +import org.apache.hadoop.fs.s3a.UnknownStoreException; +import org.apache.hadoop.fs.s3a.impl.DirMarkerTracker; +import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy; +import org.apache.hadoop.fs.s3a.impl.DirectoryPolicyImpl; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.s3guard.S3GuardTool; +import org.apache.hadoop.fs.shell.CommandFormat; +import org.apache.hadoop.util.DurationInfo; +import org.apache.hadoop.util.ExitUtil; + +import static org.apache.hadoop.fs.s3a.Constants.AUTHORITATIVE_PATH; +import static org.apache.hadoop.fs.s3a.Constants.BULK_DELETE_PAGE_SIZE; +import static org.apache.hadoop.fs.s3a.Constants.BULK_DELETE_PAGE_SIZE_DEFAULT; +import static org.apache.hadoop.fs.s3a.Invoker.once; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_INTERRUPTED; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_NOT_ACCEPTABLE; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_NOT_FOUND; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_SUCCESS; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_USAGE; + +/** + * Audit and S3 bucket for directory markers. + *

    + * This tool does not go anywhere near S3Guard; its scan bypasses any + * metastore as we are explicitly looking for marker objects. + */ +@InterfaceAudience.LimitedPrivate("management tools") +@InterfaceStability.Unstable +public final class MarkerTool extends S3GuardTool { + + private static final Logger LOG = LoggerFactory.getLogger(MarkerTool.class); + + /** + * Name of this tool: {@value}. + */ + public static final String MARKERS = "markers"; + + /** + * Purpose of this tool: {@value}. + */ + public static final String PURPOSE = + "View and manipulate S3 directory markers"; + + /** + * Audit sub-command: {@value}. + */ + public static final String OPT_AUDIT = "audit"; + + /** + * Clean Sub-command: {@value}. + */ + public static final String OPT_CLEAN = "clean"; + + /** + * Audit sub-command: {@value}. + */ + public static final String AUDIT = "-" + OPT_AUDIT; + + /** + * Clean Sub-command: {@value}. + */ + public static final String CLEAN = "-" + OPT_CLEAN; + + /** + * Min number of markers to find: {@value}. + */ + public static final String OPT_MIN = "min"; + + /** + * Max number of markers to find: {@value}. + */ + public static final String OPT_MAX = "max"; + + /** + * Name of a file to save the list of markers to: {@value}. + */ + public static final String OPT_OUT = "out"; + + /** + * Limit of objects to scan: {@value}. + */ + public static final String OPT_LIMIT = "limit"; + + /** + * Only consider markers found in non-authoritative paths + * as failures: {@value}. + */ + public static final String OPT_NONAUTH = "nonauth"; + + /** + * Error text when too few arguments are found. + */ + @VisibleForTesting + static final String E_ARGUMENTS = "Wrong number of arguments: %d"; + + /** + * Constant to use when there is no limit on the number of + * objects listed: {@value}. + *

    + * The value is 0 and not -1 because it allows for the limit to be + * set on the command line {@code -limit 0}. + * The command line parser rejects {@code -limit -1} as the -1 + * is interpreted as the (unknown) option "-1". + */ + public static final int UNLIMITED_LISTING = 0; + + + /** + * Constant to use when there is no minimum number of + * markers: {@value}. + */ + public static final int UNLIMITED_MIN_MARKERS = -1; + + + /** + * Usage string: {@value}. + */ + private static final String USAGE = MARKERS + + " (-" + OPT_AUDIT + + " | -" + OPT_CLEAN + ")" + + " [-" + OPT_MIN + " ]" + + " [-" + OPT_MAX + " ]" + + " [-" + OPT_OUT + " ]" + + " [-" + OPT_LIMIT + " ]" + + " [-" + OPT_NONAUTH + "]" + + " [-" + VERBOSE + "]" + + + " \n" + + "\t" + PURPOSE + "\n\n"; + + /** Will be overridden in run(), but during tests needs to avoid NPEs. */ + private PrintStream out = System.out; + + /** + * Verbosity flag. + */ + private boolean verbose; + + /** + * Store context. + */ + private StoreContext storeContext; + + /** + * Operations during the scan. + */ + private MarkerToolOperations operations; + + /** + * Constructor. + * @param conf configuration + */ + public MarkerTool(final Configuration conf) { + super(conf, + OPT_AUDIT, + OPT_CLEAN, + VERBOSE, + OPT_NONAUTH); + CommandFormat format = getCommandFormat(); + format.addOptionWithValue(OPT_MIN); + format.addOptionWithValue(OPT_MAX); + format.addOptionWithValue(OPT_LIMIT); + format.addOptionWithValue(OPT_OUT); + } + + @Override + public String getUsage() { + return USAGE; + } + + @Override + public String getName() { + return MARKERS; + } + + @Override + public void resetBindings() { + super.resetBindings(); + storeContext = null; + operations = null; + } + + @Override + public int run(final String[] args, final PrintStream stream) + throws ExitUtil.ExitException, Exception { + this.out = stream; + final List parsedArgs; + try { + parsedArgs = parseArgs(args); + } catch (CommandFormat.UnknownOptionException e) { + errorln(getUsage()); + throw new ExitUtil.ExitException(EXIT_USAGE, e.getMessage(), e); + } + if (parsedArgs.size() != 1) { + errorln(getUsage()); + println(out, "Supplied arguments: [" + + String.join(", ", parsedArgs) + + "]"); + throw new ExitUtil.ExitException(EXIT_USAGE, + String.format(E_ARGUMENTS, parsedArgs.size())); + } + // read arguments + CommandFormat command = getCommandFormat(); + verbose = command.getOpt(VERBOSE); + + // minimum number of markers expected + int expectedMin = getOptValue(OPT_MIN, 0); + // max number of markers allowed + int expectedMax = getOptValue(OPT_MAX, 0); + + + // determine the action + boolean audit = command.getOpt(OPT_AUDIT); + boolean clean = command.getOpt(OPT_CLEAN); + if (audit == clean) { + // either both are set or neither are set + // this is equivalent to (not audit xor clean) + errorln(getUsage()); + throw new ExitUtil.ExitException(EXIT_USAGE, + "Exactly one of " + AUDIT + " and " + CLEAN); + } + int limit = getOptValue(OPT_LIMIT, UNLIMITED_LISTING); + final String dir = parsedArgs.get(0); + Path path = new Path(dir); + URI uri = path.toUri(); + if (uri.getPath().isEmpty()) { + // fix up empty URI for better CLI experience + path = new Path(path, "/"); + } + FileSystem fs = path.getFileSystem(getConf()); + boolean nonAuth = command.getOpt(OPT_NONAUTH); + ScanResult result; + try { + result = execute( + new ScanArgsBuilder() + .withSourceFS(fs) + .withPath(path) + .withDoPurge(clean) + .withMinMarkerCount(expectedMin) + .withMaxMarkerCount(expectedMax) + .withLimit(limit) + .withNonAuth(nonAuth) + .build()); + } catch (UnknownStoreException ex) { + // bucket doesn't exist. + // replace the stack trace with an error code. + throw new ExitUtil.ExitException(EXIT_NOT_FOUND, + ex.toString(), ex); + } + if (verbose) { + dumpFileSystemStatistics(out); + } + + // and finally see if the output should be saved to a file + String saveFile = command.getOptValue(OPT_OUT); + if (saveFile != null && !saveFile.isEmpty()) { + println(out, "Saving result to %s", saveFile); + try (Writer writer = + new OutputStreamWriter( + new FileOutputStream(saveFile), + StandardCharsets.UTF_8)) { + final List surplus = result.getTracker() + .getSurplusMarkers() + .keySet() + .stream() + .map(p-> p.toString() + "/") + .sorted() + .collect(Collectors.toList()); + IOUtils.writeLines(surplus, "\n", writer); + } + } + + return result.finish(); + } + + /** + * Get the value of an option, or the default if the option + * is unset/empty. + * @param option option key + * @param defVal default + * @return the value to use + */ + private int getOptValue(String option, int defVal) { + CommandFormat command = getCommandFormat(); + String value = command.getOptValue(option); + if (value != null && !value.isEmpty()) { + try { + return Integer.parseInt(value); + } catch (NumberFormatException e) { + throw new ExitUtil.ExitException(EXIT_USAGE, + String.format("Argument for %s is not a number: %s", + option, value)); + } + } else { + return defVal; + } + } + + /** + * Execute the scan/purge. + * + * @param scanArgs@return scan+purge result. + * @throws IOException failure + */ + @VisibleForTesting + ScanResult execute(final ScanArgs scanArgs) + throws IOException { + S3AFileSystem fs = bindFilesystem(scanArgs.getSourceFS()); + + // extract the callbacks needed for the rest of the work + storeContext = fs.createStoreContext(); + operations = fs.createMarkerToolOperations(); + // filesystem policy. + // if the -nonauth option is set, this is used to filter + // out surplus markers from the results. + DirectoryPolicy activePolicy = fs.getDirectoryMarkerPolicy(); + DirectoryPolicy.MarkerPolicy policy = activePolicy + .getMarkerPolicy(); + println(out, "The directory marker policy of %s is \"%s\"", + storeContext.getFsURI(), + policy); + String authPath = storeContext.getConfiguration() + .getTrimmed(AUTHORITATIVE_PATH, ""); + if (policy == DirectoryPolicy.MarkerPolicy.Authoritative) { + // in auth mode, note the auth paths. + println(out, "Authoritative path list is \"%s\"", authPath); + } + // qualify the path + Path path = scanArgs.getPath(); + Path target = path.makeQualified(fs.getUri(), new Path("/")); + // initial safety check: does the path exist? + try { + getFilesystem().getFileStatus(target); + } catch (UnknownStoreException ex) { + // bucket doesn't exist. + // replace the stack trace with an error code. + throw new ExitUtil.ExitException(EXIT_NOT_FOUND, + ex.toString(), ex); + + } catch (FileNotFoundException ex) { + throw new ExitUtil.ExitException(EXIT_NOT_FOUND, + "Not found: " + target, ex); + } + + // the default filter policy is that all entries should be deleted + DirectoryPolicy filterPolicy; + if (scanArgs.isNonAuth()) { + filterPolicy = new DirectoryPolicyImpl( + DirectoryPolicy.MarkerPolicy.Authoritative, + fs::allowAuthoritative); + } else { + filterPolicy = null; + } + int minMarkerCount = scanArgs.getMinMarkerCount(); + int maxMarkerCount = scanArgs.getMaxMarkerCount(); + if (minMarkerCount > maxMarkerCount) { + // swap min and max if they are wrong. + // this is to ensure any test scripts written to work around + // HADOOP-17332 and min/max swapping continue to work. + println(out, "Swapping -min (%d) and -max (%d) values", + minMarkerCount, maxMarkerCount); + int m = minMarkerCount; + minMarkerCount = maxMarkerCount; + maxMarkerCount = m; + } + ScanResult result = scan(target, + scanArgs.isDoPurge(), + minMarkerCount, + maxMarkerCount, + scanArgs.getLimit(), + filterPolicy); + return result; + } + + /** + * Result of the scan operation. + */ + public static final class ScanResult { + + /** + * Exit code to return if an exception was not raised. + */ + private int exitCode; + + /** + * Text to include if raising an exception. + */ + private String exitText = ""; + + /** + * Count of all markers found. + */ + private int totalMarkerCount; + + /** + * Count of all markers found after excluding + * any from a [-nonauth] qualification. + */ + private int filteredMarkerCount; + + /** + * The tracker. + */ + private DirMarkerTracker tracker; + + /** + * Scan summary. + */ + private MarkerPurgeSummary purgeSummary; + + private ScanResult() { + } + + @Override + public String toString() { + return "ScanResult{" + + "exitCode=" + exitCode + + ", exitText=" + exitText + + ", totalMarkerCount=" + totalMarkerCount + + ", filteredMarkerCount=" + filteredMarkerCount + + ", tracker=" + tracker + + ", purgeSummary=" + purgeSummary + + '}'; + } + + /** Exit code to report. */ + public int getExitCode() { + return exitCode; + } + + /** Tracker which did the scan. */ + public DirMarkerTracker getTracker() { + return tracker; + } + + /** Summary of purge. Null if none took place. */ + public MarkerPurgeSummary getPurgeSummary() { + return purgeSummary; + } + + public int getTotalMarkerCount() { + return totalMarkerCount; + } + + public int getFilteredMarkerCount() { + return filteredMarkerCount; + } + + /** + * Throw an exception if the exit code is non-zero. + * @return 0 if everything is good. + * @throws ExitUtil.ExitException if code != 0 + */ + public int finish() throws ExitUtil.ExitException { + if (exitCode != 0) { + throw new ExitUtil.ExitException(exitCode, exitText); + } + return 0; + } + } + + /** + * Do the scan/purge. + * @param path path to scan. + * @param doPurge purge rather than just scan/audit? + * @param minMarkerCount min marker count (ignored on purge) + * @param maxMarkerCount max marker count (ignored on purge) + * @param limit limit of files to scan; 0 for 'unlimited' + * @param filterPolicy filter policy on a nonauth scan; may be null + * @return result. + * @throws IOException IO failure + * @throws ExitUtil.ExitException explicitly raised failure + */ + @Retries.RetryTranslated + private ScanResult scan( + final Path path, + final boolean doPurge, + final int minMarkerCount, + final int maxMarkerCount, + final int limit, + final DirectoryPolicy filterPolicy) + throws IOException, ExitUtil.ExitException { + + // safety check: min and max are correctly ordered at this point. + Preconditions.checkArgument(minMarkerCount <= maxMarkerCount, + "The min marker count of %d is greater than the max value of %d", + minMarkerCount, maxMarkerCount); + + ScanResult result = new ScanResult(); + + // Mission Accomplished + result.exitCode = EXIT_SUCCESS; + // Now do the work. + DirMarkerTracker tracker = new DirMarkerTracker(path, true); + result.tracker = tracker; + boolean completed; + try (DurationInfo ignored = + new DurationInfo(LOG, "marker scan %s", path)) { + completed = scanDirectoryTree(path, tracker, limit); + } + int objectsFound = tracker.getObjectsFound(); + println(out, "Listed %d object%s under %s%n", + objectsFound, + suffix(objectsFound), + path); + // scan done. what have we got? + Map surplusMarkers + = tracker.getSurplusMarkers(); + Map leafMarkers + = tracker.getLeafMarkers(); + // determine marker count + int markerCount = surplusMarkers.size(); + result.totalMarkerCount = markerCount; + result.filteredMarkerCount = markerCount; + if (markerCount == 0) { + println(out, "No surplus directory markers were found under %s", path); + } else { + println(out, "Found %d surplus directory marker%s under %s", + markerCount, + suffix(markerCount), + path); + + for (Path markers : surplusMarkers.keySet()) { + println(out, " %s/", markers); + } + } + if (!leafMarkers.isEmpty()) { + println(out, "Found %d empty directory 'leaf' marker%s under %s", + leafMarkers.size(), + suffix(leafMarkers.size()), + path); + for (Path markers : leafMarkers.keySet()) { + println(out, " %s/", markers); + } + println(out, "These are required to indicate empty directories"); + } + + if (doPurge) { + // clean: remove the markers, do not worry about their + // presence when reporting success/failure + int deletePageSize = storeContext.getConfiguration() + .getInt(BULK_DELETE_PAGE_SIZE, + BULK_DELETE_PAGE_SIZE_DEFAULT); + result.purgeSummary = purgeMarkers(tracker, deletePageSize); + } else { + // this is an audit, so validate the marker count + + if (filterPolicy != null) { + // if a filter policy is supplied, filter out all markers + // under the auth path + List allowed = tracker.removeAllowedMarkers(filterPolicy); + int allowedMarkers = allowed.size(); + println(out, "%nIgnoring %d marker%s in authoritative paths", + allowedMarkers, suffix(allowedMarkers)); + if (verbose) { + allowed.forEach(p -> println(out, p.toString())); + } + // recalculate the marker size + markerCount = surplusMarkers.size(); + result.filteredMarkerCount = markerCount; + } + if (markerCount < minMarkerCount || markerCount > maxMarkerCount) { + // failure + return failScan(result, EXIT_NOT_ACCEPTABLE, + "Marker count %d out of range " + + "[%d - %d]", + markerCount, minMarkerCount, maxMarkerCount); + } + } + + // now one little check for whether a limit was reached. + if (!completed) { + failScan(result, EXIT_INTERRUPTED, + "Listing limit (%d) reached before completing the scan", limit); + } + return result; + } + + /** + * Fail the scan; print the formatted error and update the result. + * @param result result to update + * @param code Exit code + * @param message Error message + * @param args arguments for the error message + * @return scan result + */ + private ScanResult failScan( + ScanResult result, + int code, + String message, + Object...args) { + String text = String.format(message, args); + result.exitCode = code; + result.exitText = text; + return result; + } + + /** + * Suffix for plurals. + * @param size size to generate a suffix for + * @return "" or "s", depending on size + */ + private String suffix(final int size) { + return size == 1 ? "" : "s"; + } + + /** + * Scan a directory tree. + * @param path path to scan + * @param tracker tracker to update + * @param limit limit of files to scan; -1 for 'unlimited' + * @return true if the scan completedly scanned the entire tree + * @throws IOException IO failure + */ + @Retries.RetryTranslated + private boolean scanDirectoryTree( + final Path path, + final DirMarkerTracker tracker, + final int limit) throws IOException { + + int count = 0; + boolean result = true; + RemoteIterator listing = operations + .listObjects(path, storeContext.pathToKey(path)); + while (listing.hasNext()) { + count++; + S3AFileStatus status = listing.next(); + Path statusPath = status.getPath(); + S3ALocatedFileStatus locatedStatus = new S3ALocatedFileStatus( + status, null); + String key = storeContext.pathToKey(statusPath); + if (status.isDirectory()) { + if (verbose) { + println(out, " Directory Marker %s/", key); + } + LOG.debug("{}", key); + tracker.markerFound(statusPath, + key + "/", + locatedStatus); + } else { + tracker.fileFound(statusPath, + key, + locatedStatus); + } + if ((count % 1000) == 0) { + println(out, "Scanned %,d objects", count); + } + if (limit > 0 && count >= limit) { + println(out, "Limit of scan reached - %,d object%s", + limit, suffix(limit)); + result = false; + break; + } + } + LOG.debug("Listing summary {}", listing); + if (verbose) { + println(out, "%nListing statistics:%n %s%n", + ioStatisticsSourceToString(listing)); + } + return result; + } + + /** + * Result of a call of {@link #purgeMarkers(DirMarkerTracker, int)}; + * included in {@link ScanResult} so must share visibility. + */ + public static final class MarkerPurgeSummary { + + /** Number of markers deleted. */ + private int markersDeleted; + + /** Number of delete requests issued. */ + private int deleteRequests; + + /** + * Total duration of delete requests. + * If this is ever parallelized, this will + * be greater than the elapsed time of the + * operation. + */ + private long totalDeleteRequestDuration; + + @Override + public String toString() { + return "MarkerPurgeSummary{" + + "markersDeleted=" + markersDeleted + + ", deleteRequests=" + deleteRequests + + ", totalDeleteRequestDuration=" + totalDeleteRequestDuration + + '}'; + } + + + /** + * Count of markers deleted. + * @return a number, zero when prune==false. + */ + int getMarkersDeleted() { + return markersDeleted; + } + + /** + * Count of bulk delete requests issued. + * @return count of calls made to S3. + */ + int getDeleteRequests() { + return deleteRequests; + } + + /** + * Total duration of delete requests. + * @return a time interval in millis. + */ + long getTotalDeleteRequestDuration() { + return totalDeleteRequestDuration; + } + } + + /** + * Purge the markers. + * @param tracker tracker with the details + * @param deletePageSize page size of deletes + * @return summary + * @throws IOException IO failure + */ + @Retries.RetryTranslated + private MarkerPurgeSummary purgeMarkers( + final DirMarkerTracker tracker, + final int deletePageSize) + throws MultiObjectDeleteException, AmazonClientException, IOException { + + MarkerPurgeSummary summary = new MarkerPurgeSummary(); + // we get a map of surplus markers to delete. + Map markers + = tracker.getSurplusMarkers(); + int size = markers.size(); + // build a list from the strings in the map + List collect = + markers.values().stream() + .map(p -> new DeleteObjectsRequest.KeyVersion(p.getKey())) + .collect(Collectors.toList()); + // build an array list for ease of creating the lists of + // keys in each page through the subList() method. + List markerKeys = + new ArrayList<>(collect); + + // now randomize. Why so? if the list spans multiple S3 partitions, + // it should reduce the IO load on each part. + Collections.shuffle(markerKeys); + int pages = size / deletePageSize; + if (size % deletePageSize > 0) { + pages += 1; + } + if (verbose) { + println(out, "%n%d marker%s to delete in %d page%s of %d keys/page", + size, suffix(size), + pages, suffix(pages), + deletePageSize); + } + DurationInfo durationInfo = new DurationInfo(LOG, "Deleting markers"); + int start = 0; + while (start < size) { + // end is one past the end of the page + int end = Math.min(start + deletePageSize, size); + List page = markerKeys.subList(start, + end); + List undeleted = new ArrayList<>(); + once("Remove S3 Keys", + tracker.getBasePath().toString(), () -> + operations.removeKeys(page, true, undeleted, null, false)); + summary.deleteRequests++; + // and move to the start of the next page + start = end; + } + durationInfo.close(); + summary.totalDeleteRequestDuration = durationInfo.value(); + summary.markersDeleted = size; + return summary; + } + + public boolean isVerbose() { + return verbose; + } + + public void setVerbose(final boolean verbose) { + this.verbose = verbose; + } + + /** + * Execute the marker tool, with no checks on return codes. + * + * @param scanArgs set of args for the scanner. + * @return the result + */ + @SuppressWarnings("IOResourceOpenedButNotSafelyClosed") + public static MarkerTool.ScanResult execMarkerTool( + ScanArgs scanArgs) throws IOException { + MarkerTool tool = new MarkerTool(scanArgs.getSourceFS().getConf()); + tool.setVerbose(LOG.isDebugEnabled()); + + return tool.execute(scanArgs); + } + + /** + * Arguments for the scan. + *

    + * Uses a builder/argument object because too many arguments were + * being created and it was making maintenance harder. + */ + public static final class ScanArgs { + + /** Source FS; must be or wrap an S3A FS. */ + private final FileSystem sourceFS; + + /** Path to scan. */ + private final Path path; + + /** Purge? */ + private final boolean doPurge; + + /** Min marker count (ignored on purge). */ + private final int minMarkerCount; + + /** Max marker count (ignored on purge). */ + private final int maxMarkerCount; + + /** Limit of files to scan; 0 for 'unlimited'. */ + private final int limit; + + /** Consider only markers in nonauth paths as errors. */ + private final boolean nonAuth; + + /** + * @param sourceFS source FS; must be or wrap an S3A FS. + * @param path path to scan. + * @param doPurge purge? + * @param minMarkerCount min marker count (ignored on purge) + * @param maxMarkerCount max marker count (ignored on purge) + * @param limit limit of files to scan; 0 for 'unlimited' + * @param nonAuth consider only markers in nonauth paths as errors + */ + private ScanArgs(final FileSystem sourceFS, + final Path path, + final boolean doPurge, + final int minMarkerCount, + final int maxMarkerCount, + final int limit, + final boolean nonAuth) { + this.sourceFS = sourceFS; + this.path = path; + this.doPurge = doPurge; + this.minMarkerCount = minMarkerCount; + this.maxMarkerCount = maxMarkerCount; + this.limit = limit; + this.nonAuth = nonAuth; + } + + FileSystem getSourceFS() { + return sourceFS; + } + + Path getPath() { + return path; + } + + boolean isDoPurge() { + return doPurge; + } + + int getMinMarkerCount() { + return minMarkerCount; + } + + int getMaxMarkerCount() { + return maxMarkerCount; + } + + int getLimit() { + return limit; + } + + boolean isNonAuth() { + return nonAuth; + } + } + + /** + * Builder of the scan arguments. + */ + public static final class ScanArgsBuilder { + + /** Source FS; must be or wrap an S3A FS. */ + private FileSystem sourceFS; + + /** Path to scan. */ + private Path path; + + /** Purge? */ + private boolean doPurge = false; + + /** Min marker count (ignored on purge). */ + private int minMarkerCount = 0; + + /** Max marker count (ignored on purge). */ + private int maxMarkerCount = 0; + + /** Limit of files to scan; 0 for 'unlimited'. */ + private int limit = UNLIMITED_LISTING; + + /** Consider only markers in nonauth paths as errors. */ + private boolean nonAuth = false; + + /** Source FS; must be or wrap an S3A FS. */ + public ScanArgsBuilder withSourceFS(final FileSystem source) { + this.sourceFS = source; + return this; + } + + /** Path to scan. */ + public ScanArgsBuilder withPath(final Path p) { + this.path = p; + return this; + } + + /** Purge? */ + public ScanArgsBuilder withDoPurge(final boolean d) { + this.doPurge = d; + return this; + } + + /** Min marker count (ignored on purge). */ + public ScanArgsBuilder withMinMarkerCount(final int min) { + this.minMarkerCount = min; + return this; + } + + /** Max marker count (ignored on purge). */ + public ScanArgsBuilder withMaxMarkerCount(final int max) { + this.maxMarkerCount = max; + return this; + } + + /** Limit of files to scan; 0 for 'unlimited'. */ + public ScanArgsBuilder withLimit(final int l) { + this.limit = l; + return this; + } + + /** Consider only markers in nonauth paths as errors. */ + public ScanArgsBuilder withNonAuth(final boolean b) { + this.nonAuth = b; + return this; + } + + /** + * Build the actual argument instance. + * @return the arguments to pass in + */ + public ScanArgs build() { + return new ScanArgs(sourceFS, + path, + doPurge, + minMarkerCount, + maxMarkerCount, + limit, + nonAuth); + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperations.java new file mode 100644 index 0000000000000..9ab7636d6c99f --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperations.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.tools; + +import java.io.IOException; +import java.util.List; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.DeleteObjectsResult; +import com.amazonaws.services.s3.model.MultiObjectDeleteException; + +import org.apache.hadoop.fs.InvalidRequestException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.Retries; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; + +/** + * Operations which must be offered by the store for {@link MarkerTool}. + * These are a proper subset of {@code OperationCallbacks}; this interface + * strips down those provided to the tool. + */ +public interface MarkerToolOperations { + + /** + * Create an iterator over objects in S3 only; S3Guard + * is not involved. + * The listing includes the key itself, if found. + * @param path path of the listing. + * @param key object key + * @return iterator with the first listing completed. + * @throws IOException failure. + */ + @Retries.RetryTranslated + RemoteIterator listObjects( + Path path, + String key) + throws IOException; + + /** + * Remove keys from the store, updating the metastore on a + * partial delete represented as a MultiObjectDeleteException failure by + * deleting all those entries successfully deleted and then rethrowing + * the MultiObjectDeleteException. + * @param keysToDelete collection of keys to delete on the s3-backend. + * if empty, no request is made of the object store. + * @param deleteFakeDir indicates whether this is for deleting fake dirs. + * @param undeletedObjectsOnFailure List which will be built up of all + * files that were not deleted. This happens even as an exception + * is raised. + * @param operationState bulk operation state + * @param quiet should a bulk query be quiet, or should its result list + * all deleted keys + * @return the deletion result if a multi object delete was invoked + * and it returned without a failure, else null. + * @throws InvalidRequestException if the request was rejected due to + * a mistaken attempt to delete the root directory. + * @throws MultiObjectDeleteException one or more of the keys could not + * be deleted in a multiple object delete operation. + * @throws AmazonClientException amazon-layer failure. + * @throws IOException other IO Exception. + */ + @Retries.RetryMixed + DeleteObjectsResult removeKeys( + List keysToDelete, + boolean deleteFakeDir, + List undeletedObjectsOnFailure, + BulkOperationState operationState, + boolean quiet) + throws MultiObjectDeleteException, AmazonClientException, + IOException; + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperationsImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperationsImpl.java new file mode 100644 index 0000000000000..d14bb6b1d8ebb --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperationsImpl.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.tools; + +import java.io.IOException; +import java.util.List; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.DeleteObjectsResult; +import com.amazonaws.services.s3.model.MultiObjectDeleteException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.impl.OperationCallbacks; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; + +/** + * Implement the marker tool operations by forwarding to the + * {@link OperationCallbacks} instance provided in the constructor. + */ +public class MarkerToolOperationsImpl implements MarkerToolOperations { + + private final OperationCallbacks operationCallbacks; + + /** + * Constructor. + * @param operations implementation of the operations + */ + public MarkerToolOperationsImpl(final OperationCallbacks operations) { + this.operationCallbacks = operations; + } + + @Override + public RemoteIterator listObjects(final Path path, + final String key) + throws IOException { + return operationCallbacks.listObjects(path, key); + } + + @Override + public DeleteObjectsResult removeKeys( + final List keysToDelete, + final boolean deleteFakeDir, + final List undeletedObjectsOnFailure, + final BulkOperationState operationState, + final boolean quiet) + throws MultiObjectDeleteException, AmazonClientException, IOException { + return operationCallbacks.removeKeys(keysToDelete, deleteFakeDir, + undeletedObjectsOnFailure, operationState, quiet); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/package-info.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/package-info.java new file mode 100644 index 0000000000000..cb3a3749b658c --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/package-info.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * S3A Command line tools independent of S3Guard. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +package org.apache.hadoop.fs.s3a.tools; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java index 84e4a6768f976..df02b879f3e05 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java @@ -22,8 +22,8 @@ import java.net.URISyntaxException; import java.util.Objects; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader index d16846b25b52f..68a4c79250611 100644 --- a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader +++ b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploader @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -org.apache.hadoop.fs.s3a.S3AMultipartUploader +org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader diff --git a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory b/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory deleted file mode 100644 index 2e4bc241d0c94..0000000000000 --- a/hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.MultipartUploaderFactory +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -org.apache.hadoop.fs.s3a.S3AMultipartUploader$Factory diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md index 3071754836c53..048f08cf7c0f0 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md @@ -20,6 +20,27 @@ This document covers the architecture and implementation details of the S3A comm For information on using the committers, see [the S3A Committers](./committer.html). +### January 2021 Update + +Now that S3 is fully consistent, problems related to inconsistent +directory listings have gone. However the rename problem exists: committing +work by renaming directories is unsafe as well as horribly slow. + +This architecture document, and the committers, were written at a time +when S3 was inconsistent. The two committers addressed this problem differently + +* Staging Committer: rely on a cluster HDFS filesystem for safely propagating + the lists of files to commit from workers to the job manager/driver. +* Magic Committer: require S3Guard to offer consistent directory listings + on the object store. + +With consistent S3, the Magic Committer can be safely used with any S3 bucket. +The choice of which to use, then, is matter for experimentation. + +This architecture document was written in 2017, a time when S3 was only +consistent when an extra consistency layer such as S3Guard was used. +The document indicates where requirements/constraints which existed then +are now obsolete. ## Problem: Efficient, reliable commits of work to consistent S3 buckets @@ -49,10 +70,10 @@ can be executed server-side, but as it does not complete until the in-cluster copy has completed, it takes time proportional to the amount of data. The rename overhead is the most visible issue, but it is not the most dangerous. -That is the fact that path listings have no consistency guarantees, and may -lag the addition or deletion of files. -If files are not listed, the commit operation will *not* copy them, and -so they will not appear in the final output. +That is the fact that until late 2020, path listings had no consistency guarantees, +and may have lagged the addition or deletion of files. +If files were not listed, the commit operation would *not* copy them, and +so they would not appear in the final output. The solution to this problem is closely coupled to the S3 protocol itself: delayed completion of multi-part PUT operations @@ -242,7 +263,7 @@ def commitTask(fs, jobAttemptPath, taskAttemptPath, dest): On a genuine filesystem this is an `O(1)` directory rename. -On an object store with a mimiced rename, it is `O(data)` for the copy, +On an object store with a mimicked rename, it is `O(data)` for the copy, along with overhead for listing and deleting all files (For S3, that's `(1 + files/500)` lists, and the same number of delete calls. @@ -476,7 +497,7 @@ def needsTaskCommit(fs, jobAttemptPath, taskAttemptPath, dest): def commitTask(fs, jobAttemptPath, taskAttemptPath, dest): if fs.exists(taskAttemptPath) : - mergePathsV2(fs. taskAttemptPath, dest) + mergePathsV2(fs, taskAttemptPath, dest) ``` ### v2 Task Abort @@ -828,6 +849,8 @@ commit sequence in `Task.done()`, when `talkToAMTGetPermissionToCommit()` # Requirements of an S3A Committer +The design requirements of the S3A committer were + 1. Support an eventually consistent S3 object store as a reliable direct destination of work through the S3A filesystem client. 1. Efficient: implies no rename, and a minimal amount of delay in the job driver's @@ -841,6 +864,7 @@ the job, and any previous incompleted jobs. 1. Security: not to permit privilege escalation from other users with write access to the same file system(s). + ## Features of S3 and the S3A Client @@ -852,8 +876,8 @@ MR committer algorithms have significant performance problems. 1. Single-object renames are implemented as a copy and delete sequence. 1. COPY is atomic, but overwrites cannot be prevented. -1. Amazon S3 is eventually consistent on listings, deletes and updates. -1. Amazon S3 has create consistency, however, the negative response of a HEAD/GET +1. [Obsolete] Amazon S3 is eventually consistent on listings, deletes and updates. +1. [Obsolete] Amazon S3 has create consistency, however, the negative response of a HEAD/GET performed on a path before an object was created can be cached, unintentionally creating a create inconsistency. The S3A client library does perform such a check, on `create()` and `rename()` to check the state of the destination path, and @@ -872,11 +896,12 @@ data, with the `S3ABlockOutputStream` of HADOOP-13560 uploading written data as parts of a multipart PUT once the threshold set in the configuration parameter `fs.s3a.multipart.size` (default: 100MB). -[S3Guard](./s3guard.html) adds an option of consistent view of the filesystem +[S3Guard](./s3guard.html) added an option of consistent view of the filesystem to all processes using the shared DynamoDB table as the authoritative store of -metadata. Some S3-compatible object stores are fully consistent; the -proposed algorithm is designed to work with such object stores without the -need for any DynamoDB tables. +metadata. +The proposed algorithm was designed to work with such object stores without the +need for any DynamoDB tables. Since AWS S3 became consistent in 2020, this +means that they will work directly with the store. ## Related work: Spark's `DirectOutputCommitter` @@ -903,7 +928,7 @@ not be a problem. IBM's [Stocator](https://github.com/SparkTC/stocator) can transform indirect writes of V1/V2 committers into direct writes to the destination directory. -Hpw does it do this? It's a special Hadoop `FileSystem` implementation which +How does it do this? It's a special Hadoop `FileSystem` implementation which recognizes writes to `_temporary` paths and translate them to writes to the base directory. As well as translating the write operation, it also supports a `getFileStatus()` call on the original path, returning details on the file @@ -969,7 +994,7 @@ It is that fact, that a different process may perform different parts of the upload, which make this algorithm viable. -## The Netfix "Staging" committer +## The Netflix "Staging" committer Ryan Blue, of Netflix, has submitted an alternate committer, one which has a number of appealing features @@ -1081,7 +1106,7 @@ output reaches the job commit. Similarly, if a task is aborted, temporary output on the local FS is removed. If a task dies while the committer is running, it is possible for data to be -eft on the local FS or as unfinished parts in S3. +left on the local FS or as unfinished parts in S3. Unfinished upload parts in S3 are not visible to table readers and are cleaned up following the rules in the target bucket's life-cycle policy. @@ -1246,8 +1271,8 @@ for parallel committing of work, including all the error handling based on the Netflix experience. It differs in that it directly streams data to S3 (there is no staging), -and it also stores the lists of pending commits in S3 too. That mandates -consistent metadata on S3, which S3Guard provides. +and it also stores the lists of pending commits in S3 too. It +requires a consistent S3 store. ### Core concept: A new/modified output stream for delayed PUT commits @@ -1312,6 +1337,16 @@ On `close()`, summary data would be written to the file `/results/latest/__magic/job400_1/task_01_01/latest.orc.lzo.pending`. This would contain the upload ID and all the parts and etags of uploaded data. +A marker file is also created, so that code which verifies that a newly created file +exists does not fail. +1. These marker files are zero bytes long. +1. They declare the full length of the final file in the HTTP header + `x-hadoop-s3a-magic-data-length`. +1. A call to `getXAttr("header.x-hadoop-s3a-magic-data-length")` will return a + string containing the number of bytes in the data uploaded. + +This is needed so that the Spark write-tracking code can report how much data +has been created. #### Task commit @@ -1480,7 +1515,7 @@ The time to commit a job will be `O(files/threads)` Every `.pendingset` file in the job attempt directory must be loaded, and a PUT request issued for every incomplete upload listed in the files. -Note that it is the bulk listing of all children which is where full consistency +[Obsolete] Note that it is the bulk listing of all children which is where full consistency is required. If instead, the list of files to commit could be returned from tasks to the job committer, as the Spark commit protocol allows, it would be possible to commit data to an inconsistent object store. @@ -1525,7 +1560,7 @@ commit algorithms. 1. It is possible to create more than one client writing to the same destination file within the same S3A client/task, either sequentially or in parallel. -1. Even with a consistent metadata store, if a job overwrites existing +1. [Obsolete] Even with a consistent metadata store, if a job overwrites existing files, then old data may still be visible to clients reading the data, until the update has propagated to all replicas of the data. @@ -1538,7 +1573,7 @@ all files in the destination directory which where not being overwritten. for any purpose other than for the storage of pending commit data. 1. Unless extra code is added to every FS operation, it will still be possible -to manipulate files under the `__magic` tree. That's not bad, it just potentially +to manipulate files under the `__magic` tree. That's not bad, just potentially confusing. 1. As written data is not materialized until the commit, it will not be possible @@ -1693,14 +1728,6 @@ base for relative paths created underneath it. The committers can only be tested against an S3-compatible object store. -Although a consistent object store is a requirement for a production deployment -of the magic committer an inconsistent one has appeared to work during testing, simply by -adding some delays to the operations: a task commit does not succeed until -all the objects which it has PUT are visible in the LIST operation. Assuming -that further listings from the same process also show the objects, the job -committer will be able to list and commit the uploads. - - The committers have some unit tests, and integration tests based on the protocol integration test lifted from `org.apache.hadoop.mapreduce.lib.output.TestFileOutputCommitter` to test various state transitions of the commit mechanism has been extended @@ -1766,7 +1793,8 @@ tree. Alternatively, the fact that Spark tasks provide data to the job committer on their completion means that a list of pending PUT commands could be built up, with the commit operations being executed by an S3A-specific implementation of the `FileCommitProtocol`. -As noted earlier, this may permit the requirement for a consistent list operation + +[Obsolete] As noted earlier, this may permit the requirement for a consistent list operation to be bypassed. It would still be important to list what was being written, as it is needed to aid aborting work in failed tasks, but the list of files created by successful tasks could be passed directly from the task to committer, @@ -1890,9 +1918,6 @@ bandwidth and the data upload bandwidth. No use is made of the cluster filesystem; there are no risks there. -A consistent store is required, which, for Amazon's infrastructure, means S3Guard. -This is covered below. - A malicious user with write access to the `__magic` directory could manipulate or delete the metadata of pending uploads, or potentially inject new work int the commit. Having access to the `__magic` directory implies write access @@ -1900,13 +1925,12 @@ to the parent destination directory: a malicious user could just as easily manipulate the final output, without needing to attack the committer's intermediate files. - ### Security Risks of all committers #### Visibility -* If S3Guard is used for storing metadata, then the metadata is visible to +[Obsolete] If S3Guard is used for storing metadata, then the metadata is visible to all users with read access. A malicious user with write access could delete entries of newly generated files, so they would not be visible. @@ -1941,7 +1965,7 @@ any of the text fields, script which could then be executed in some XSS attack. We may wish to consider sanitizing this data on load. * Paths in tampered data could be modified in an attempt to commit an upload across -an existing file, or the MPU ID alterated to prematurely commit a different upload. +an existing file, or the MPU ID altered to prematurely commit a different upload. These attempts will not going to succeed, because the destination path of the upload is declared on the initial POST to initiate the MPU, and operations associated with the MPU must also declare the path: if the path and diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md index dee77b62e6a97..6cd847b4abb67 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md @@ -26,9 +26,29 @@ and reliable commitment of output to S3. For details on their internal design, see [S3A Committers: Architecture and Implementation](./committer_architecture.html). +### January 2021 Update -## Introduction: The Commit Problem +Now that S3 is fully consistent, problems related to inconsistent directory +listings have gone. However the rename problem exists: committing work by +renaming directories is unsafe as well as horribly slow. + +This architecture document, and the committers, were written at a time when S3 +was inconsistent. The two committers addressed this problem differently + +* Staging Committer: rely on a cluster HDFS filesystem for safely propagating + the lists of files to commit from workers to the job manager/driver. +* Magic Committer: require S3Guard to offer consistent directory listings on the + object store. + +With consistent S3, the Magic Committer can be safely used with any S3 bucket. +The choice of which to use, then, is matter for experimentation. +This document was written in 2017, a time when S3 was only +consistent when an extra consistency layer such as S3Guard was used. The +document indicates where requirements/constraints which existed then are now +obsolete. + +## Introduction: The Commit Problem Apache Hadoop MapReduce (and behind the scenes, Apache Spark) often write the output of their work to filesystems @@ -50,21 +70,18 @@ or it is at the destination, -in which case the rename actually succeeded. **The S3 object store and the `s3a://` filesystem client cannot meet these requirements.* -1. Amazon S3 has inconsistent directory listings unless S3Guard is enabled. -1. The S3A mimics `rename()` by copying files and then deleting the originals. +Although S3A is (now) consistent, the S3A client still mimics `rename()` +by copying files and then deleting the originals. This can fail partway through, and there is nothing to prevent any other process in the cluster attempting a rename at the same time. As a result, -* Files my not be listed, hence not renamed into place. -* Deleted files may still be discovered, confusing the rename process to the point -of failure. * If a rename fails, the data is left in an unknown state. * If more than one process attempts to commit work simultaneously, the output directory may contain the results of both processes: it is no longer an exclusive operation. -*. While S3Guard may deliver the listing consistency, commit time is still +*. Commit time is still proportional to the amount of data created. It still can't handle task failure. **Using the "classic" `FileOutputCommmitter` to commit work to Amazon S3 risks @@ -163,10 +180,8 @@ and restarting the job. whose output is in the job attempt directory, *and only rerunning all uncommitted tasks*. -None of this algorithm works safely or swiftly when working with "raw" AWS S3 storage: -* Directory listing can be inconsistent: the tasks and jobs may not list all work to -be committed. -* Renames go from being fast, atomic operations to slow operations which can fail partway through. +This algorithm does not works safely or swiftly with AWS S3 storage because +tenames go from being fast, atomic operations to slow operations which can fail partway through. This then is the problem which the S3A committers address: @@ -341,12 +356,11 @@ task commit. However, it has extra requirements of the filesystem -1. It requires a consistent object store, which for Amazon S3, -means that [S3Guard](./s3guard.html) must be enabled. For third-party stores, -consult the documentation. +1. [Obsolete] It requires a consistent object store. 1. The S3A client must be configured to recognize interactions with the magic directories and treat them specially. +Now that Amazon S3 is consistent, the magic committer is enabled by default. It's also not been field tested to the extent of Netflix's committer; consider it the least mature of the committers. @@ -358,14 +372,15 @@ it the least mature of the committers. Partitioned Committer. Make sure you have enough hard disk capacity for all staged data. Do not use it in other situations. -1. If you know that your object store is consistent, or that the processes -writing data use S3Guard, use the Magic Committer for higher performance -writing of large amounts of data. +1. If you do not have a shared cluster store: use the Magic Committer. + +1. If you are writing large amounts of data: use the Magic Committer. 1. Otherwise: use the directory committer, making sure you have enough hard disk capacity for all staged data. -Put differently: start with the Directory Committer. +Now that S3 is consistent, there are fewer reasons not to use the Magic Committer. +Experiment with both to see which works best for your work. ## Switching to an S3A Committer @@ -499,9 +514,6 @@ performance. ### FileSystem client setup -1. Use a *consistent* S3 object store. For Amazon S3, this means enabling -[S3Guard](./s3guard.html). For S3-compatible filesystems, consult the filesystem -documentation to see if it is consistent, hence compatible "out of the box". 1. Turn the magic on by `fs.s3a.committer.magic.enabled"` ```xml @@ -514,8 +526,6 @@ documentation to see if it is consistent, hence compatible "out of the box".
    ``` -*Do not use the Magic Committer on an inconsistent S3 object store. For -Amazon S3, that means S3Guard must *always* be enabled. ### Enabling the committer @@ -530,25 +540,32 @@ Amazon S3, that means S3Guard must *always* be enabled. Conflict management is left to the execution engine itself. -## Committer Configuration Options +## Common Committer Options + +| Option | Meaning | Default | +|--------|---------|---------| +| `mapreduce.fileoutputcommitter.marksuccessfuljobs` | Write a `_SUCCESS` file on the successful completion of the job. | `true` | +| `fs.s3a.buffer.dir` | Local filesystem directory for data being written and/or staged. | `${hadoop.tmp.dir}/s3a` | +| `fs.s3a.committer.magic.enabled` | Enable "magic committer" support in the filesystem. | `false` | +| `fs.s3a.committer.abort.pending.uploads` | list and abort all pending uploads under the destination path when the job is committed or aborted. | `true` | +| `fs.s3a.committer.threads` | Number of threads in committers for parallel operations on files. | 8 | +| `fs.s3a.committer.generate.uuid` | Generate a Job UUID if none is passed down from Spark | `false` | +| `fs.s3a.committer.require.uuid` |Require the Job UUID to be passed down from Spark | `false` | -| Option | Magic | Directory | Partitioned | Meaning | Default | -|--------|-------|-----------|-------------|---------|---------| -| `mapreduce.fileoutputcommitter.marksuccessfuljobs` | X | X | X | Write a `_SUCCESS` file at the end of each job | `true` | -| `fs.s3a.committer.threads` | X | X | X | Number of threads in committers for parallel operations on files. | 8 | -| `fs.s3a.committer.staging.conflict-mode` | | X | X | Conflict resolution: `fail`, `append` or `replace`| `append` | -| `fs.s3a.committer.staging.unique-filenames` | | X | X | Generate unique filenames | `true` | -| `fs.s3a.committer.magic.enabled` | X | | | Enable "magic committer" support in the filesystem | `false` | +## Staging committer (Directory and Partitioned) options +| Option | Meaning | Default | +|--------|---------|---------| +| `fs.s3a.committer.staging.conflict-mode` | Conflict resolution: `fail`, `append` or `replace`| `append` | +| `fs.s3a.committer.staging.tmp.path` | Path in the cluster filesystem for temporary data. | `tmp/staging` | +| `fs.s3a.committer.staging.unique-filenames` | Generate unique filenames. | `true` | +| `fs.s3a.committer.staging.abort.pending.uploads` | Deprecated; replaced by `fs.s3a.committer.abort.pending.uploads`. | `(false)` | -| Option | Magic | Directory | Partitioned | Meaning | Default | -|--------|-------|-----------|-------------|---------|---------| -| `fs.s3a.buffer.dir` | X | X | X | Local filesystem directory for data being written and/or staged. | | -| `fs.s3a.committer.staging.tmp.path` | | X | X | Path in the cluster filesystem for temporary data | `tmp/staging` | +### Common Committer Options ```xml @@ -562,11 +579,9 @@ Conflict management is left to the execution engine itself. fs.s3a.committer.magic.enabled - false + true Enable support in the filesystem for the S3 "Magic" committer. - When working with AWS S3, S3Guard must be enabled for the destination - bucket, as consistent metadata listings are required. @@ -579,6 +594,60 @@ Conflict management is left to the execution engine itself. + + fs.s3a.committer.abort.pending.uploads + true + + Should the committers abort all pending uploads to the destination + directory? + + Set to false if more than one job is writing to the same directory tree. + Was: "fs.s3a.committer.staging.abort.pending.uploads" when only used + by the staging committers. + + + + + mapreduce.outputcommitter.factory.scheme.s3a + org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory + + The committer factory to use when writing data to S3A filesystems. + If mapreduce.outputcommitter.factory.class is set, it will + override this property. + + (This property is set in mapred-default.xml) + + + + + fs.s3a.committer.require.uuid + false + + Should the committer fail to initialize if a unique ID isn't set in + "spark.sql.sources.writeJobUUID" or fs.s3a.committer.staging.uuid + This helps guarantee that unique IDs for jobs are being + passed down in spark applications. + + Setting this option outside of spark will stop the S3A committer + in job setup. In MapReduce workloads the job attempt ID is unique + and so no unique ID need be passed down. + + + + + fs.s3a.committer.generate.uuid + false + + Generate a Job UUID if none is passed down from Spark. + This uuid is only generated if the fs.s3a.committer.require.uuid flag + is false. + + +``` + +### Staging Committer Options + +```xml fs.s3a.committer.staging.tmp.path tmp/staging @@ -613,38 +682,66 @@ Conflict management is left to the execution engine itself. + +``` + +### Disabling magic committer path rewriting + +The magic committer recognizes when files are created under paths with `__magic/` as a parent directory +and redirects the upload to a different location, adding the information needed to complete the upload +in the job commit operation. + +If, for some reason, you *do not* want these paths to be redirected and not manifest until later, +the feature can be disabled by setting `fs.s3a.committer.magic.enabled` to false. + +By default it is true. + +```xml - s.s3a.committer.staging.abort.pending.uploads + fs.s3a.committer.magic.enabled true - Should the staging committers abort all pending uploads to the destination - directory? - - Changing this if more than one partitioned committer is - writing to the same destination tree simultaneously; otherwise - the first job to complete will cancel all outstanding uploads from the - others. However, it may lead to leaked outstanding uploads from failed - tasks. If disabled, configure the bucket lifecycle to remove uploads - after a time period, and/or set up a workflow to explicitly delete - entries. Otherwise there is a risk that uncommitted uploads may run up - bills. + Enable support in the S3A filesystem for the "Magic" committer. +``` - - mapreduce.outputcommitter.factory.scheme.s3a - org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory - - The committer factory to use when writing data to S3A filesystems. - If mapreduce.outputcommitter.factory.class is set, it will - override this property. +## Concurrent Jobs writing to the same destination - (This property is set in mapred-default.xml) - +It is sometimes possible for multiple jobs to simultaneously write to the same destination path. + +Before attempting this, the committers must be set to not delete all incomplete uploads on job commit, +by setting `fs.s3a.committer.abort.pending.uploads` to `false` + +```xml + + fs.s3a.committer.abort.pending.uploads + false +``` + +If more than one job is writing to the same destination path then every task MUST +be creating files with paths/filenames unique to the specific job. +It is not enough for them to be unique by task `part-00000.snappy.parquet`, +because each job will have tasks with the same name, so generate files with conflicting operations. + +For the staging committers, setting `fs.s3a.committer.staging.unique-filenames` to ensure unique names are +generated during the upload. Otherwise, use what configuration options are available in the specific `FileOutputFormat`. + +Note: by default, the option `mapreduce.output.basename` sets the base name for files; +changing that from the default `part` value to something unique for each job may achieve this. + +For example, for any job executed through Hadoop MapReduce, the Job ID can be used in the filename. +```xml + + mapreduce.output.basename + part-${mapreduce.job.id} + ``` +Even with these settings, the outcome of concurrent jobs to the same destination is +inherently nondeterministic -use with caution. ## Troubleshooting @@ -658,7 +755,6 @@ in configuration option fs.s3a.committer.magic.enabled The Job is configured to use the magic committer, but the S3A bucket has not been explicitly declared as supporting it. -The destination bucket **must** be declared as supporting the magic committer. This can be done for those buckets which are known to be consistent, either because [S3Guard](s3guard.html) is used to provide consistency, @@ -671,10 +767,6 @@ or because the S3-compatible filesystem is known to be strongly consistent. ``` -*IMPORTANT*: only enable the magic committer against object stores which -offer consistent listings. By default, Amazon S3 does not do this -which is -why the option `fs.s3a.committer.magic.enabled` is disabled by default. - Tip: you can verify that a bucket supports the magic committer through the `hadoop s3guard bucket-info` command: @@ -700,7 +792,7 @@ Delegation token support is disabled Exiting with status 46: 46: The magic committer is not enabled for s3a://landsat-pds ``` -## Error message: "File being created has a magic path, but the filesystem has magic file support disabled: +### Error message: "File being created has a magic path, but the filesystem has magic file support disabled" A file is being written to a path which is used for "magic" files, files which are actually written to a different destination than their stated path @@ -781,7 +873,7 @@ If you have subclassed `FileOutputCommitter` and want to move to the factory model, please get in touch. -## Job/Task fails with PathExistsException: Destination path exists and committer conflict resolution mode is "fail" +### Job/Task fails with PathExistsException: Destination path exists and committer conflict resolution mode is "fail" This surfaces when either of two conditions are met. @@ -795,7 +887,7 @@ during task commit, which will cause the entire job to fail. If you are trying to write data and want write conflicts to be rejected, this is the correct behavior: there was data at the destination so the job was aborted. -## Staging committer task fails with IOException: No space left on device +### Staging committer task fails with IOException: No space left on device There's not enough space on the local hard disk (real or virtual) to store all the uncommitted data of the active tasks on that host. @@ -821,3 +913,169 @@ generating less data each. 1. Use the magic committer. This only needs enough disk storage to buffer blocks of the currently being written file during their upload process, so can use a lot less disk space. + +### Jobs run with directory/partitioned committers complete but the output is empty. + +Make sure that `fs.s3a.committer.staging.tmp.path` is set to a path on the shared cluster +filesystem (usually HDFS). It MUST NOT be set to a local directory, as then the job committer, +running on a different host *will not see the lists of pending uploads to commit*. + +### Magic output committer task fails "The specified upload does not exist" "Error Code: NoSuchUpload" + +The magic committer is being used and a task writing data to the S3 store fails +with an error message about the upload not existing. + +``` +java.io.FileNotFoundException: upload part #1 upload + YWHTRqBaxlsutujKYS3eZHfdp6INCNXbk0JVtydX_qzL5fZcoznxRbbBZRfswOjomddy3ghRyguOqywJTfGG1Eq6wOW2gitP4fqWrBYMroasAygkmXNYF7XmUyFHYzja + on test/ITestMagicCommitProtocol-testParallelJobsToSameDestPaths/part-m-00000: + com.amazonaws.services.s3.model.AmazonS3Exception: The specified upload does not + exist. The upload ID may be invalid, or the upload may have been aborted or + completed. (Service: Amazon S3; Status Code: 404; Error Code: NoSuchUpload; + Request ID: EBE6A0C9F8213AC3; S3 Extended Request ID: + cQFm2N+666V/1HehZYRPTHX9tFK3ppvHSX2a8Oy3qVDyTpOFlJZQqJpSixMVyMI1D0dZkHHOI+E=), + S3 Extended Request ID: + cQFm2N+666V/1HehZYRPTHX9tFK3ppvHSX2a8Oy3qVDyTpOFlJZQqJpSixMVyMI1D0dZkHHOI+E=:NoSuchUpload + + at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:259) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:112) + at org.apache.hadoop.fs.s3a.Invoker.lambda$retry$4(Invoker.java:315) + at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:407) + at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:311) + at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:286) + at org.apache.hadoop.fs.s3a.WriteOperationHelper.retry(WriteOperationHelper.java:154) + at org.apache.hadoop.fs.s3a.WriteOperationHelper.uploadPart(WriteOperationHelper.java:590) + at org.apache.hadoop.fs.s3a.S3ABlockOutputStream$MultiPartUpload.lambda$uploadBlockAsync$0(S3ABlockOutputStream.java:652) + +Caused by: com.amazonaws.services.s3.model.AmazonS3Exception: + The specified upload does not exist. + The upload ID may be invalid, or the upload may have been aborted or completed. + (Service: Amazon S3; Status Code: 404; Error Code: NoSuchUpload; Request ID: EBE6A0C9F8213AC3; S3 Extended Request ID: + cQFm2N+666V/1HehZYRPTHX9tFK3ppvHSX2a8Oy3qVDyTpOFlJZQqJpSixMVyMI1D0dZkHHOI+E=), + S3 Extended Request ID: cQFm2N+666V/1HehZYRPTHX9tFK3ppvHSX2a8Oy3qVDyTpOFlJZQqJpSixMVyMI1D0dZkHHOI+E= + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1712) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1367) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1113) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:770) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:744) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:726) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:686) + at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:668) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:532) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:512) + at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4920) + at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4866) + at com.amazonaws.services.s3.AmazonS3Client.doUploadPart(AmazonS3Client.java:3715) + at com.amazonaws.services.s3.AmazonS3Client.uploadPart(AmazonS3Client.java:3700) + at org.apache.hadoop.fs.s3a.S3AFileSystem.uploadPart(S3AFileSystem.java:2343) + at org.apache.hadoop.fs.s3a.WriteOperationHelper.lambda$uploadPart$8(WriteOperationHelper.java:594) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:110) + ... 15 more +``` + +The block write failed because the previously created upload was aborted before the data could be written. + +Causes + +1. Another job has written to the same directory tree with an S3A committer + -and when that job was committed, all incomplete uploads were aborted. +1. The `hadoop s3guard uploads --abort` command has being called on/above the directory. +1. Some other program is cancelling uploads to that bucket/path under it. +1. The job is lasting over 24h and a bucket lifecycle policy is aborting the uploads. + +The `_SUCCESS` file from the previous job may provide diagnostics. + +If the cause is Concurrent Jobs, see [Concurrent Jobs writing to the same destination](#concurrent-jobs). + +### Job commit fails "java.io.FileNotFoundException: Completing multipart upload" "The specified upload does not exist" + +The job commit fails with an error about the specified upload not existing. + +``` +java.io.FileNotFoundException: Completing multipart upload on + test/DELAY_LISTING_ME/ITestDirectoryCommitProtocol-testParallelJobsToSameDestPaths/part-m-00001: + com.amazonaws.services.s3.model.AmazonS3Exception: + The specified upload does not exist. + The upload ID may be invalid, or the upload may have been aborted or completed. + (Service: Amazon S3; Status Code: 404; Error Code: NoSuchUpload; + Request ID: 8E6173241D2970CB; S3 Extended Request ID: + Pg6x75Q60UrbSJgfShCFX7czFTZAHR1Cy7W0Kh+o1uj60CG9jw7hL40tSa+wa7BRLbaz3rhX8Ds=), + S3 Extended Request ID: + Pg6x75Q60UrbSJgfShCFX7czFTZAHR1Cy7W0Kh+o1uj60CG9jw7hL40tSa+wa7BRLbaz3rhX8Ds=:NoSuchUpload + + at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:259) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:112) + at org.apache.hadoop.fs.s3a.Invoker.lambda$retry$4(Invoker.java:315) + at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:407) + at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:311) + at org.apache.hadoop.fs.s3a.WriteOperationHelper.finalizeMultipartUpload(WriteOperationHelper.java:261) + at org.apache.hadoop.fs.s3a.WriteOperationHelper.commitUpload(WriteOperationHelper.java:549) + at org.apache.hadoop.fs.s3a.commit.CommitOperations.innerCommit(CommitOperations.java:199) + at org.apache.hadoop.fs.s3a.commit.CommitOperations.commit(CommitOperations.java:168) + at org.apache.hadoop.fs.s3a.commit.CommitOperations.commitOrFail(CommitOperations.java:144) + at org.apache.hadoop.fs.s3a.commit.CommitOperations.access$100(CommitOperations.java:74) + at org.apache.hadoop.fs.s3a.commit.CommitOperations$CommitContext.commitOrFail(CommitOperations.java:612) + at org.apache.hadoop.fs.s3a.commit.AbstractS3ACommitter.lambda$loadAndCommit$5(AbstractS3ACommitter.java:535) + at org.apache.hadoop.fs.s3a.commit.Tasks$Builder.runSingleThreaded(Tasks.java:164) + at org.apache.hadoop.fs.s3a.commit.Tasks$Builder.run(Tasks.java:149) + at org.apache.hadoop.fs.s3a.commit.AbstractS3ACommitter.loadAndCommit(AbstractS3ACommitter.java:534) + at org.apache.hadoop.fs.s3a.commit.AbstractS3ACommitter.lambda$commitPendingUploads$2(AbstractS3ACommitter.java:482) + at org.apache.hadoop.fs.s3a.commit.Tasks$Builder$1.run(Tasks.java:253) + +Caused by: com.amazonaws.services.s3.model.AmazonS3Exception: The specified upload does not exist. + The upload ID may be invalid, or the upload may have been aborted or completed. + (Service: Amazon S3; Status Code: 404; Error Code: NoSuchUpload; Request ID: 8E6173241D2970CB; + S3 Extended Request ID: Pg6x75Q60UrbSJgfShCFX7czFTZAHR1Cy7W0Kh+o1uj60CG9jw7hL40tSa+wa7BRLbaz3rhX8Ds=), + + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1712) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1367) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1113) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:770) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:744) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:726) + at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:686) + at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:668) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:532) + at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:512) + at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4920) + at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4866) + at com.amazonaws.services.s3.AmazonS3Client.completeMultipartUpload(AmazonS3Client.java:3464) + at org.apache.hadoop.fs.s3a.WriteOperationHelper.lambda$finalizeMultipartUpload$1(WriteOperationHelper.java:267) + at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:110) +``` + +The problem is likely to be that of the previous one: concurrent jobs are writing the same output directory, +or another program has cancelled all pending uploads. + +See [Concurrent Jobs writing to the same destination](#concurrent-jobs). + +### Job commit fails `java.io.FileNotFoundException` "File hdfs://.../staging-uploads/_temporary/0 does not exist" + +The Staging committer will fail in job commit if the intermediate directory on the cluster FS is missing during job commit. + +This is possible if another job used the same staging upload directory and, + after committing its work, it deleted the directory. + +A unique Job ID is required for each spark job run by a specific user. +Spark generates job IDs for its committers using the current timestamp, +and if two jobs/stages are started in the same second, they will have the same job ID. + +See [SPARK-33230](https://issues.apache.org/jira/browse/SPARK-33230). + +This is fixed in all spark releases which have the patch applied. + +You can set the property `fs.s3a.committer.staging.require.uuid` to fail +the staging committers fast if a unique Job ID isn't found in +`spark.sql.sources.writeJobUUID`. + +### Job setup fails `Job/task context does not contain a unique ID in spark.sql.sources.writeJobUUID` + +This will surface in job setup if the option `fs.s3a.committer.require.uuid` is `true`, and +one of the following conditions are met + +1. The committer is being used in a Hadoop MapReduce job, whose job attempt ID is unique + -there is no need to add this requirement. + Fix: unset `fs.s3a.committer.require.uuid`. +1. The committer is being used in spark, and the version of spark being used does not + set the `spark.sql.sources.writeJobUUID` property. + Either upgrade to a new spark release, or set `fs.s3a.committer.generate.uuid` to true. diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/directory_markers.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/directory_markers.md new file mode 100644 index 0000000000000..65fcb6502fb94 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/directory_markers.md @@ -0,0 +1,747 @@ + + +# Experimental: Controlling the S3A Directory Marker Behavior + +This document discusses an experimental feature of the S3A +connector since Hadoop 3.3.1: the ability to retain directory +marker objects above paths containing files or subdirectories. + +## Critical: this is not backwards compatible! + +This document shows how the performance of S3 I/O, especially applications +creating many files (for example Apache Hive) or working with versioned S3 buckets can +increase performance by changing the S3A directory marker retention policy. + +Changing the policy from the default value, `"delete"` _is not backwards compatible_. + +Versions of Hadoop which are incompatible with other marker retention policies, +as of August 2020. + +------------------------------------------------------- +| Branch | Compatible Since | Supported | +|------------|------------------|---------------------| +| Hadoop 2.x | n/a | WONTFIX | +| Hadoop 3.0 | check | Read-only | +| Hadoop 3.1 | check | Read-only | +| Hadoop 3.2 | check | Read-only | +| Hadoop 3.3 | 3.3.1 | Done | +------------------------------------------------------- + +*WONTFIX* + +The Hadoop branch-2 line will *not* be patched. + +*Read-only* + +These branches have read-only compatibility. + +* They may list directories with directory markers, and correctly identify when + such directories have child entries. +* They will open files under directories with such markers. + +However, they have limitations when writing/deleting directories. + +Specifically: S3Guard tables may not be correctly updated in +all conditions, especially on the partial failure of delete +operations. Specifically: they may mistakenly add a tombstone in +the dynamoDB table and so future directory/directory tree listings +will consider the directory to be nonexistent. + +_It is not safe for Hadoop releases before Hadoop 3.3.1 to write +to S3 buckets which have directory markers when S3Guard is enabled_ + +## Verifying read compatibility. + +The `s3guard bucket-info` tool [can be used to verify support](#bucket-info). +This allows for a command line check of compatibility, including +in scripts. + +External Hadoop-based applications should also be assumed to be incompatible +unless otherwise stated/known. + +It is only safe change the directory marker policy if the following + conditions are met: + +1. You know exactly which applications are writing to and reading from + (including backing up) an S3 bucket. +2. You know all applications which read data from the bucket are compatible. + + +### Applications backing up data. + +It is not enough to have a version of Apache Hadoop which is compatible, any +application which backs up an S3 bucket or copies elsewhere must have an S3 +connector which is compatible. For the Hadoop codebase, that means that if +distcp is used, it _must_ be from a compatible hadoop version. + +### How will incompatible applications/versions fail? + +Applications using an incompatible version of the S3A connector will mistake +directories containing data for empty directories. This means that: + +* Listing directories/directory trees may exclude files which exist. +* Queries across the data will miss data files. +* Renaming a directory to a new location may exclude files underneath. + +The failures are silent: there is no error message, stack trace or +other warning that files may have been missed. They simply aren't +found. + +### If an application has updated a directory tree incompatibly-- what can be done? + +There's a tool on the hadoop command line, [marker tool](#marker-tool) which can audit +a bucket/path for markers, and clean up any markers which were found. +It can be used to make a bucket compatible with older applications. + +Now that this is all clear, let's explain the problem. + + +## Background: Directory Markers: what and why? + +Amazon S3 is not a filesystem, it is an object store. + +The S3A connector not only provides a hadoop-compatible API to interact with +data in S3, it tries to maintain the filesystem metaphor. + +One key aspect of the metaphor of a file system is "directories" + +#### The directory concept + +In normal Unix-style filesystems, the "filesystem" is really a "directory and +file tree" in which files are always stored in "directories" + + +* A directory may contain zero or more files. +* A directory may contain zero or more directories "subdirectories" +* At the base of a filesystem is the "root directory" +* All files MUST be in a directory "the parent directory" +* All directories other than the root directory must be in another directory. +* If a directory contains no files or directories, it is "empty" +* When a directory is _listed_, all files and directories in it are enumerated + and returned to the caller + + +The S3A connector emulates this metaphor by grouping all objects which have +the same prefix as if they are in the same directory tree. + +If there are two objects `a/b/file1` and `a/b/file2` then S3A pretends that there is a +directory `/a/b` containing two files `file1` and `file2`. + +The directory itself does not exist. + +There's a bit of a complication here. + +#### What does `mkdirs()` do? + +1. In HDFS and other "real" filesystems, when `mkdirs()` is invoked on a path +whose parents are all directories, then an _empty directory_ is created. + +1. This directory can be probed for "it exists" and listed (an empty list is +returned) + +1. Files and other directories can be created in it. + + +Lots of code contains a big assumption here: after you create a directory it +exists. They also assume that after files in a directory are deleted, the +directory still exists. + +Given the S3A connector mimics directories just by aggregating objects which share a +prefix, how can you have empty directories? + +The original Hadoop `s3n://` connector created a Directory Marker -any path ending +in `_$folder$` was considered to be a sign that a directory existed. A call to +`mkdir(s3n://bucket/a/b)` would create a new marker object `a/b_$folder$` . + +The S3A also has directory markers, but it just appends a "/" to the directory +name, so `mkdir(s3a://bucket/a/b)` will create a new marker object `a/b/` . + +When a file is created under a path, the directory marker is deleted. And when a +file is deleted, if it was the last file in the directory, the marker is +recreated. + +And, historically, When a path is listed, if a marker to that path is found, *it +has been interpreted as an empty directory.* + +It is that little detail which is the cause of the incompatibility issues. + +## The Problem with Directory Markers + +Creating, deleting and the listing directory markers adds overhead and can slow +down applications. + +Whenever a file is created we have to delete any marker which could exist in +parent directory _or any parent paths_. Rather than do a sequence of probes for +parent markers existing, the connector issues a single request to S3 to delete +all parents. For example, if a file `/a/b/file1` is created, a multi-object +`DELETE` request containing the keys `/a/` and `/a/b/` is issued. +If no markers exists, this is harmless. + +When a file is deleted, a check for the parent directory continuing to exist +(i.e. are there sibling files/directories?), and if not a marker is created. + +This all works well and has worked well for many years. + +However, it turns out to have some scale problems, especially from the delete +call made whenever a file is created. + +1. The number of the objects listed in each request is that of the number of +parent directories: deeper trees create longer requests. + +2. Every single object listed in the delete request is considered to be a write +operation. + +3. In versioned S3 buckets, tombstone markers are added to the S3 indices even +if no object was deleted. + +4. There's also the overhead of actually issuing the request and awaiting the +response. + +Issue #2 has turned out to cause significant problems on some interactions with +large hive tables: + +Because each object listed in a DELETE call is treated as one operation, and +there is (as of summer 2020) a limit of 3500 write requests/second in a directory +tree. +When writing many files to a deep directory tree, it is the delete calls which +create throttling problems. + +The tombstone markers have follow-on consequences -it makes listings against +S3 versioned buckets slower. +This can have adverse effects on those large directories, again. + +## Strategies to avoid marker-related problems. + +### Presto: every path is a directory + +In the Presto [S3 connector](https://prestodb.io/docs/current/connector/hive.html#amazon-s3-configuration), +`mkdirs()` is a no-op. +Whenever it lists any path which isn't an object or a prefix of one more more objects, it returns an +empty listing. That is:; by default, every path is an empty directory. + +Provided no code probes for a directory existing and fails if it is there, this +is very efficient. That's a big requirement however, -one Presto can pull off +because they know how their file uses data in S3. + + +### Hadoop 3.3.1+: marker deletion is now optional + +From Hadoop 3.3.1 onwards, the S3A client can be configured to skip deleting +directory markers when creating files under paths. This removes all scalability +problems caused by deleting these markers -however, it is achieved at the expense +of backwards compatibility. + +## Controlling marker retention with `fs.s3a.directory.marker.retention` + +There is now an option `fs.s3a.directory.marker.retention` which controls how +markers are managed when new files are created + +*Default* `delete`: a request is issued to delete any parental directory markers +whenever a file or directory is created. + +*New* `keep`: No delete request is issued. +Any directory markers which exist are not deleted. +This is *not* backwards compatible + +*New* `authoritative`: directory markers are deleted _except for files created +in "authoritative" directories_. +This is backwards compatible _outside authoritative directories_. + +Until now, the notion of an "authoritative" +directory has only been used as a performance optimization for deployments +where it is known that all Applications are using the same S3Guard metastore +when writing and reading data. +In such a deployment, if it is also known that all applications are using a +compatible version of the s3a connector, then they +can switch to the higher-performance mode for those specific directories. + +Only the default setting, `fs.s3a.directory.marker.retention = delete` is compatible with +every shipping Hadoop releases. + +## Directory Markers and S3Guard + +Applications which interact with S3A in S3A clients with S3Guard enabled still +create and delete markers. There's no attempt to skip operations, such as by having +`mkdirs() `create entries in the DynamoDB table but not the store. +Having the client always update S3 ensures that other applications and clients +do (eventually) see the changes made by the "guarded" application. + +When S3Guard is configured to treat some directories as [Authoritative](s3guard.html#authoritative) +then an S3A connector with a retention policy of `fs.s3a.directory.marker.retention` of +`authoritative` will omit deleting markers in authoritative directories. + +*Note* there may be further changes in directory semantics in "authoritative mode"; +only use in managed applications where all clients are using the same version of +hadoop, and configured consistently. + +After the directory marker feature [HADOOP-13230](https://issues.apache.org/jira/browse/HADOOP-13230) +was added, issues related to S3Guard integration surfaced: + +1. The incremental update of the S3Guard table was inserting tombstones + over directories as the markers were deleted, hiding files underneath. + This happened during directory `rename()` and `delete()`. +1. The update of the S3Guard table after a partial failure of a bulk delete + operation would insert tombstones in S3Guard records of successfully + deleted markers, irrespective of the directory status. + +Issue #1 is unique to Hadoop branch 3.3; however issue #2 is s critical +part of the S3Guard consistency handling. + +Both issues have been fixed in Hadoop 3.3.x, +in [HADOOP-17244](https://issues.apache.org/jira/browse/HADOOP-17244) + +Issue #2, delete failure handling, is not easily backported and is +not likely to be backported. + +Accordingly: Hadoop releases with read-only compatibility must not be used +to rename or delete directories where markers are retained *when S3Guard is enabled.* + +## Verifying marker policy with `s3guard bucket-info` + +The `bucket-info` command has been enhanced to support verification from the command +line of bucket policies via the `-marker` option + + +| option | verifies | +|--------|--------| +| `-markers aware` | the hadoop release is "aware" of directory markers | +| `-markers delete` | directory markers are deleted | +| `-markers keep` | directory markers are kept (not backwards compatible) | +| `-markers authoritative` | directory markers are kept in authoritative paths | + +All releases of Hadoop which have been updated to be marker aware will support the `-markers aware` option. + + +1. Updated releases which do not support switching marker retention policy will also support the +`-markers delete` option. + + +Example: `s3guard bucket-info -markers aware` on a compatible release. + +``` +> hadoop s3guard bucket-info -markers aware s3a://landsat-pds/ + Filesystem s3a://landsat-pds + Location: us-west-2 + Filesystem s3a://landsat-pds is not using S3Guard + +... + + Security + Delegation token support is disabled + + The directory marker policy is "delete" + + The S3A connector is compatible with buckets where directory markers are not deleted + Available Policies: delete, keep, authoritative +``` + +The same command will fail on older releases, because the `-markers` option +is unknown + +``` +> hadoop s3guard bucket-info -markers aware s3a://landsat-pds/ +Illegal option -markers +Usage: hadoop bucket-info [OPTIONS] s3a://BUCKET + provide/check S3Guard information about a specific bucket + +Common options: + -guarded - Require S3Guard + -unguarded - Force S3Guard to be disabled + -auth - Require the S3Guard mode to be "authoritative" + -nonauth - Require the S3Guard mode to be "non-authoritative" + -magic - Require the S3 filesystem to be support the "magic" committer + -encryption -require {none, sse-s3, sse-kms} - Require encryption policy + +When possible and not overridden by more specific options, metadata +repository information will be inferred from the S3A URL (if provided) + +Generic options supported are: + -conf - specify an application configuration file + -D - define a value for a given property + +2020-08-12 16:47:16,579 [main] INFO util.ExitUtil (ExitUtil.java:terminate(210)) - Exiting with status 42: Illegal option -markers +```` + +A specific policy check verifies that the connector is configured as desired + +``` +> hadoop s3guard bucket-info -markers delete s3a://landsat-pds/ +Filesystem s3a://landsat-pds +Location: us-west-2 +Filesystem s3a://landsat-pds is not using S3Guard + +... + +The directory marker policy is "delete" +``` + +When probing for a specific policy, the error code "46" is returned if the active policy +does not match that requested: + +``` +> hadoop s3guard bucket-info -markers keep s3a://landsat-pds/ +Filesystem s3a://landsat-pds +Location: us-west-2 +Filesystem s3a://landsat-pds is not using S3Guard + +... + +Security + Delegation token support is disabled + +The directory marker policy is "delete" + +2020-08-12 17:14:30,563 [main] INFO util.ExitUtil (ExitUtil.java:terminate(210)) - Exiting with status 46: 46: Bucket s3a://landsat-pds: required marker policy is "keep" but actual policy is "delete" +``` + + +## The marker tool:`hadoop s3guard markers` + +The marker tool aims to help migration by scanning/auditing directory trees +for surplus markers, and for optionally deleting them. +Leaf-node markers for empty directories are not considered surplus and +will be retained. + +Syntax + +``` +> hadoop s3guard markers -verbose -nonauth +markers (-audit | -clean) [-min ] [-max ] [-out ] [-limit ] [-nonauth] [-verbose] + View and manipulate S3 directory markers + +``` + +*Options* + +| Option | Meaning | +|-------------------------|-------------------------| +| `-audit` | Audit the path for surplus markers | +| `-clean` | Clean all surplus markers under a path | +| `-min ` | Minimum number of markers an audit must find (default: 0) | +| `-max ]` | Minimum number of markers an audit must find (default: 0) | +| `-limit ]` | Limit the number of objects to scan | +| `-nonauth` | Only consider markers in non-authoritative paths as errors | +| `-out ` | Save a list of all markers found to the nominated file | +| `-verbose` | Verbose output | + +*Exit Codes* + +| Code | Meaning | +|-------|---------| +| 0 | Success | +| 3 | interrupted -the value of `-limit` was reached | +| 42 | Usage | +| 46 | Markers were found (see HTTP "406", "unacceptable") | + +All other non-zero status code also indicate errors of some form or other. + +### `markers -audit` + +Audit the path and fail if any markers were found. + + +``` +> hadoop s3guard markers -limit 8000 -audit s3a://landsat-pds/ + +The directory marker policy of s3a://landsat-pds is "Delete" +2020-08-05 13:42:56,079 [main] INFO tools.MarkerTool (DurationInfo.java:(77)) - Starting: marker scan s3a://landsat-pds/ +Scanned 1,000 objects +Scanned 2,000 objects +Scanned 3,000 objects +Scanned 4,000 objects +Scanned 5,000 objects +Scanned 6,000 objects +Scanned 7,000 objects +Scanned 8,000 objects +Limit of scan reached - 8,000 objects +2020-08-05 13:43:01,184 [main] INFO tools.MarkerTool (DurationInfo.java:close(98)) - marker scan s3a://landsat-pds/: duration 0:05.107s +No surplus directory markers were found under s3a://landsat-pds/ +Listing limit reached before completing the scan +2020-08-05 13:43:01,187 [main] INFO util.ExitUtil (ExitUtil.java:terminate(210)) - Exiting with status 3: +``` + +Here the scan reached its object limit before completing the audit; the exit code of 3, "interrupted" indicates this. + +Example: a verbose audit of a bucket whose policy if authoritative -it is not an error if markers +are found under the path `/tables`. + +``` +> bin/hadoop s3guard markers -audit s3a://london/ + + 2020-08-05 18:29:16,473 [main] INFO impl.DirectoryPolicyImpl (DirectoryPolicyImpl.java:getDirectoryPolicy(143)) - Directory markers will be kept on authoritative paths + The directory marker policy of s3a://london is "Authoritative" + Authoritative path list is "/tables" + 2020-08-05 18:29:19,186 [main] INFO tools.MarkerTool (DurationInfo.java:(77)) - Starting: marker scan s3a://london/ + 2020-08-05 18:29:21,610 [main] INFO tools.MarkerTool (DurationInfo.java:close(98)) - marker scan s3a://london/: duration 0:02.425s + Listed 8 objects under s3a://london/ + +Found 3 surplus directory markers under s3a://london/ + s3a://london/tables + s3a://london/tables/tables-4 + s3a://london/tables/tables-4/tables-5 +Found 5 empty directory 'leaf' markers under s3a://london/ + s3a://london/tables/tables-2 + s3a://london/tables/tables-3 + s3a://london/tables/tables-4/tables-5/06 + s3a://london/tables2 + s3a://london/tables3 + These are required to indicate empty directories + Surplus markers were found -failing audit + 2020-08-05 18:29:21,614 [main] INFO util.ExitUtil (ExitUtil.java:terminate(210)) - Exiting with status 46: +``` + +This fails because surplus markers were found. This S3A bucket would *NOT* be safe for older Hadoop versions +to use. + +The `-nonauth` option does not treat markers under authoritative paths as errors: + +``` +bin/hadoop s3guard markers -nonauth -audit s3a://london/ + +2020-08-05 18:31:16,255 [main] INFO impl.DirectoryPolicyImpl (DirectoryPolicyImpl.java:getDirectoryPolicy(143)) - Directory markers will be kept on authoritative paths +The directory marker policy of s3a://london is "Authoritative" +Authoritative path list is "/tables" +2020-08-05 18:31:19,210 [main] INFO tools.MarkerTool (DurationInfo.java:(77)) - Starting: marker scan s3a://london/ +2020-08-05 18:31:22,240 [main] INFO tools.MarkerTool (DurationInfo.java:close(98)) - marker scan s3a://london/: duration 0:03.031s +Listed 8 objects under s3a://london/ + +Found 3 surplus directory markers under s3a://london/ + s3a://london/tables/ + s3a://london/tables/tables-4/ + s3a://london/tables/tables-4/tables-5/ +Found 5 empty directory 'leaf' markers under s3a://london/ + s3a://london/tables/tables-2/ + s3a://london/tables/tables-3/ + s3a://london/tables/tables-4/tables-5/06/ + s3a://london/tables2/ + s3a://london/tables3/ +These are required to indicate empty directories + +Ignoring 3 markers in authoritative paths +``` + +All of this S3A bucket _other_ than the authoritative path `/tables` will be safe for +incompatible Hadoop releases to to use. + + +### `markers clean` + +The `markers clean` command will clean the directory tree of all surplus markers. +The `-verbose` option prints more detail on the operation as well as some IO statistics + +``` +> hadoop s3guard markers -clean -verbose s3a://london/ + +2020-08-05 18:33:25,303 [main] INFO impl.DirectoryPolicyImpl (DirectoryPolicyImpl.java:getDirectoryPolicy(143)) - Directory markers will be kept on authoritative paths +The directory marker policy of s3a://london is "Authoritative" +Authoritative path list is "/tables" +2020-08-05 18:33:28,511 [main] INFO tools.MarkerTool (DurationInfo.java:(77)) - Starting: marker scan s3a://london/ + Directory Marker tables + Directory Marker tables/tables-2/ + Directory Marker tables/tables-3/ + Directory Marker tables/tables-4/ + Directory Marker tables/tables-4/tables-5/ + Directory Marker tables/tables-4/tables-5/06/ + Directory Marker tables2/ + Directory Marker tables3/ +2020-08-05 18:33:31,685 [main] INFO tools.MarkerTool (DurationInfo.java:close(98)) - marker scan s3a://london/: duration 0:03.175s +Listed 8 objects under s3a://london/ + +Found 3 surplus directory markers under s3a://london/ + s3a://london/tables/ + s3a://london/tables/tables-4/ + s3a://london/tables/tables-4/tables-5/ +Found 5 empty directory 'leaf' markers under s3a://london/ + s3a://london/tables/tables-2/ + s3a://london/tables/tables-3/ + s3a://london/tables/tables-4/tables-5/06/ + s3a://london/tables2/ + s3a://london/tables3/ +These are required to indicate empty directories + +3 markers to delete in 1 page of 250 keys/page +2020-08-05 18:33:31,688 [main] INFO tools.MarkerTool (DurationInfo.java:(77)) - Starting: Deleting markers +2020-08-05 18:33:31,812 [main] INFO tools.MarkerTool (DurationInfo.java:close(98)) - Deleting markers: duration 0:00.124s + +Storage Statistics for s3a://london + +op_get_file_status 1 +object_delete_requests 1 +object_list_requests 2 +``` + +The `markers -clean` command _does not_ delete markers above empty directories -only those which have +files underneath. If invoked on a path, it will clean up the directory tree into a state +where it is safe for older versions of Hadoop to interact with. + +Note that if invoked with a `-limit` value, surplus markers found during the scan will be removed, +even though the scan will be considered a failure due to the limit being reached. + +## Advanced Topics + + +### Probing for retention via `PathCapabilities` and `StreamCapabilities` + +An instance of the filesystem can be probed for its directory marker retention ability/ +policy can be probed for through the `org.apache.hadoop.fs.PathCapabilities` interface, +which all FileSystem classes have supported since Hadoop 3.3. + + +| Probe | Meaning | +|-------------------------|-------------------------| +| `fs.s3a.capability.directory.marker.aware` | Does the filesystem support surplus directory markers? | +| `fs.s3a.capability.directory.marker.policy.delete` | Is the bucket policy "delete"? | +| `fs.s3a.capability.directory.marker.policy.keep` | Is the bucket policy "keep"? | +| `fs.s3a.capability.directory.marker.policy.authoritative` | Is the bucket policy "authoritative"? | +| `fs.s3a.capability.directory.marker.action.delete` | If a file was created at this path, would directory markers be deleted? | +| `fs.s3a.capability.directory.marker.action.keep` | If a file was created at this path, would directory markers be retained? | + + +The probe `fs.s3a.capability.directory.marker.aware` allows for a filesystem to be +probed to determine if its file listing policy is "aware" of directory marker retention +-that is: can this s3a client safely work with S3 buckets where markers have not been deleted. + +The `fs.s3a.capability.directory.marker.policy.` probes return the active policy for the bucket. + +The two `fs.s3a.capability.directory.marker.action.` probes dynamically query the marker +retention behavior of a specific path. +That is: if a file was created at that location, would ancestor directory markers +be kept or deleted? + +The `S3AFileSystem` class also implements the `org.apache.hadoop.fs.StreamCapabilities` interface, which +can be used to probe for marker awareness via the `fs.s3a.capability.directory.marker.aware` capability. + +Again, this will be true if-and-only-if the S3A connector is safe to work with S3A buckets/paths where +directories are retained. + +*If an S3A instance, probed by `PathCapabilities` or `StreamCapabilities` for the capability +`fs.s3a.capability.directory.marker.aware` and it returns false, *it is not safe to be used with +S3A paths where markers have been retained*. + +This is programmatic probe -however it can be accessed on the command line via the +external [`cloudstore`](https://github.com/steveloughran/cloudstore) tool: + +``` +> hadoop jar cloudstore-1.0.jar pathcapability fs.s3a.capability.directory.marker.aware s3a://london/ + +Probing s3a://london/ for capability fs.s3a.capability.directory.marker.aware + +Using filesystem s3a://london +Path s3a://london/ has capability fs.s3a.capability.directory.marker.aware +``` + +If the exit code of the command is `0`, then the S3A is safe to work with buckets +where markers have not been deleted. + +The same tool can be used to dynamically probe for the policy. + +Take a bucket with a retention policy of "authoritative" -only paths under `/tables` will have markers retained. + +```xml + + fs.s3a.bucket.london.directory.marker.retention + authoritative + + + fs.s3a.bucket.london.authoritative.path + /tables + ``` +``` + +With this policy the path capability `fs.s3a.capability.directory.marker.action.keep` will hold under +the path `s3a://london/tables` + +``` +bin/hadoop jar cloudstore-1.0.jar pathcapability fs.s3a.capability.directory.marker.action.keep s3a://london/tables +Probing s3a://london/tables for capability fs.s3a.capability.directory.marker.action.keep +2020-08-11 22:03:31,658 [main] INFO impl.DirectoryPolicyImpl (DirectoryPolicyImpl.java:getDirectoryPolicy(143)) + - Directory markers will be kept on authoritative paths +Using filesystem s3a://london +Path s3a://london/tables has capability fs.s3a.capability.directory.marker.action.keep +``` + +However it will not hold for other paths, so indicating that older Hadoop versions will be safe +to work with data written there by this S3A client. + +``` +bin/hadoop jar cloudstore-1.0.jar pathcapability fs.s3a.capability.directory.marker.action.keep s3a://london/tempdir +Probing s3a://london/tempdir for capability fs.s3a.capability.directory.marker.action.keep +2020-08-11 22:06:56,300 [main] INFO impl.DirectoryPolicyImpl (DirectoryPolicyImpl.java:getDirectoryPolicy(143)) + - Directory markers will be kept on authoritative paths +Using filesystem s3a://london +Path s3a://london/tempdir lacks capability fs.s3a.capability.directory.marker.action.keep +2020-08-11 22:06:56,308 [main] INFO util.ExitUtil (ExitUtil.java:terminate(210)) - Exiting with status -1: +``` + + +## Glossary + +#### Directory Marker + +An object in an S3 bucket with a trailing "/", used to indicate that there is a directory at that location. +These are necessary to maintain expectations about directories in an object store: + +1. After `mkdirs(path)`, `exists(path)` holds. +1. After `rm(path/*)`, `exists(path)` holds. + +In previous releases of Hadoop, the marker created by a `mkdirs()` operation was deleted after a file was created. +Rather than make a slow HEAD probe + optional marker DELETE of every parent path element, HADOOP-13164 switched +to enumerating all parent paths and issuing a single bulk DELETE request. +This is faster under light load, but +as each row in the delete consumes one write operation on the allocated IOPs of that bucket partition, creates +load issues when many worker threads/processes are writing to files. +This problem is bad on Apache Hive as: +* The hive partition structure places all files within the same S3 partition. +* As they are deep structures, there are many parent entries to include in the bulk delete calls. +* It's creating a lot temporary files, and still uses rename to commit output. + +Apache Spark has less of an issue when an S3A committer is used -although the partition structure +is the same, the delayed manifestation of output files reduces load. + +#### Leaf Marker + +A directory marker which has not files or directory marker objects underneath. +It genuinely represents an empty directory. + +#### Surplus Marker + +A directory marker which is above one or more files, and so is superfluous. +These are the markers which were traditionally deleted; now it is optional. + +Older versions of Hadoop mistake such surplus markers as Leaf Markers. + +#### Versioned Bucket + +An S3 Bucket which has Object Versioning enabled. + +This provides a backup and recovery mechanism for data within the same +bucket: older objects can be listed and restored through the AWS S3 console +and some applications. + +## References + + + +* [HADOOP-13164](https://issues.apache.org/jira/browse/HADOOP-13164). _Optimize S3AFileSystem::deleteUnnecessaryFakeDirectories._ + +* [HADOOP-13230](https://issues.apache.org/jira/browse/HADOOP-13230). _S3A to optionally retain directory markers_ + +* [HADOOP-16090](https://issues.apache.org/jira/browse/HADOOP-16090). _S3A Client to add explicit support for versioned stores._ + +* [HADOOP-16823](https://issues.apache.org/jira/browse/HADOOP-16823). _Large DeleteObject requests are their own Thundering Herd_ + +* [Object Versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). _Using versioning_ + +* [Optimizing Performance](https://docs.aws.amazon.com/AmazonS3/latest/dev/optimizing-performance.html). _Best Practices Design Patterns: Optimizing Amazon S3 Performance_ diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md index 22b98ed599c81..281323542b8ea 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md @@ -16,18 +16,29 @@ -**NOTE: Hadoop's `s3:` and `s3n:` connectors have been removed. -Please use `s3a:` as the connector to data hosted in S3 with Apache Hadoop.** -**Consult the [s3n documentation](./s3n.html) for migration instructions.** +## Compatibility -See also: + +### Directory Marker Compatibility + +1. This release can safely list/index/read S3 buckets where "empty directory" +markers are retained. + +1. This release can be configured to retain these directory makers at the +expense of being backwards incompatible. + +Consult [Controlling the S3A Directory Marker Behavior](directory_markers.html) for +full details. + +## Documents * [Encryption](./encryption.html) * [Performance](./performance.html) * [S3Guard](./s3guard.html) * [Troubleshooting](./troubleshooting_s3a.html) +* [Controlling the S3A Directory Marker Behavior](directory_markers.html). * [Committing work to S3 with the "S3A Committers"](./committers.html) * [S3A Committers Architecture](./committer_architecture.html) * [Working with IAM Assumed Roles](./assumed_roles.html) @@ -70,11 +81,12 @@ schemes. * Supports authentication via: environment variables, Hadoop configuration properties, the Hadoop key management store and IAM roles. * Supports per-bucket configuration. -* With [S3Guard](./s3guard.html), adds high performance and consistent metadata/ -directory read operations. This delivers consistency as well as speed. * Supports S3 "Server Side Encryption" for both reading and writing: SSE-S3, SSE-KMS and SSE-C * Instrumented with Hadoop metrics. +* Before S3 was consistent, provided a consistent view of inconsistent storage + through [S3Guard](./s3guard.html). + * Actively maintained by the open source community. @@ -159,7 +171,7 @@ the number of files, during which time partial updates may be visible. If the operations are interrupted, the filesystem is left in an intermediate state. -### Warning #2: Directories are mimiced +### Warning #2: Directories are mimicked The S3A clients mimics directories by: @@ -184,7 +196,7 @@ Parts of Hadoop relying on this can have unexpected behaviour. E.g. the performance recursive listings whenever possible. * It is possible to create files under files if the caller tries hard. * The time to rename a directory is proportional to the number of files -underneath it (directory or indirectly) and the size of the files. (The copyis +underneath it (directory or indirectly) and the size of the files. (The copy is executed inside the S3 storage, so the time is independent of the bandwidth from client to S3). * Directory renames are not atomic: they can fail partway through, and callers @@ -320,7 +332,7 @@ export AWS_SECRET_ACCESS_KEY=my.secret.key If the environment variable `AWS_SESSION_TOKEN` is set, session authentication using "Temporary Security Credentials" is enabled; the Key ID and secret key -must be set to the credentials for that specific sesssion. +must be set to the credentials for that specific session. ```bash export AWS_SESSION_TOKEN=SECRET-SESSION-TOKEN @@ -345,7 +357,7 @@ the Hadoop configuration files. By default, the S3A client follows the following authentication chain: -1. The options `fs.s3a.access.key`, `fs.s3a.secret.key` and `fs.s3a.sesson.key +1. The options `fs.s3a.access.key`, `fs.s3a.secret.key` and `fs.s3a.sesson.key` are looked for in the Hadoop XML configuration/Hadoop credential providers, returning a set of session credentials if all three are defined. 1. The `fs.s3a.access.key` and `fs.s3a.secret.key` are looked for in the Hadoop @@ -534,7 +546,7 @@ This means that the default S3A authentication chain can be defined as to directly authenticate with S3 and DynamoDB services. When S3A Delegation tokens are enabled, depending upon the delegation token binding it may be used - to communicate wih the STS endpoint to request session/role + to communicate with the STS endpoint to request session/role credentials. These are loaded and queried in sequence for a valid set of credentials. @@ -630,13 +642,13 @@ The S3A configuration options with sensitive data and `fs.s3a.server-side-encryption.key`) can have their data saved to a binary file stored, with the values being read in when the S3A filesystem URL is used for data access. The reference to this -credential provider then declareed in the hadoop configuration. +credential provider then declared in the Hadoop configuration. For additional reading on the Hadoop Credential Provider API see: [Credential Provider API](../../../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). -The following configuration options can be storeed in Hadoop Credential Provider +The following configuration options can be stored in Hadoop Credential Provider stores. ``` @@ -725,7 +737,7 @@ of credentials. ### Using secrets from credential providers -Once the provider is set in the Hadoop configuration, hadoop commands +Once the provider is set in the Hadoop configuration, Hadoop commands work exactly as if the secrets were in an XML file. ```bash @@ -761,7 +773,7 @@ used to change the endpoint, encryption and authentication mechanisms of buckets S3Guard options, various minor options. Here are the S3A properties for use in production. The S3Guard options are -documented in the [S3Guard documenents](./s3guard.html); some testing-related +documented in the [S3Guard documents](./s3guard.html); some testing-related options are covered in [Testing](./testing.md). ```xml diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md index 6ca6060810682..ab8b922df2a70 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md @@ -30,11 +30,11 @@ That's because its a very different system, as you can see: | communication | RPC | HTTP GET/PUT/HEAD/LIST/COPY requests | | data locality | local storage | remote S3 servers | | replication | multiple datanodes | asynchronous after upload | -| consistency | consistent data and listings | eventual consistent for listings, deletes and updates | +| consistency | consistent data and listings | consistent since November 2020| | bandwidth | best: local IO, worst: datacenter network | bandwidth between servers and S3 | | latency | low | high, especially for "low cost" directory operations | -| rename | fast, atomic | slow faked rename through COPY & DELETE| -| delete | fast, atomic | fast for a file, slow & non-atomic for directories | +| rename | fast, atomic | slow faked rename through COPY and DELETE| +| delete | fast, atomic | fast for a file, slow and non-atomic for directories | | writing| incremental | in blocks; not visible until the writer is closed | | reading | seek() is fast | seek() is slow and expensive | | IOPs | limited only by hardware | callers are throttled to shards in an s3 bucket | @@ -539,7 +539,7 @@ in Java 9, so if `default_jsse` is specified and applications run on Java includes GCM in the list of cipher suites on Java 8, so it is equivalent to running with the vanilla JSSE. -### OpenSSL Acceleration +### OpenSSL Acceleration **Experimental Feature** @@ -552,8 +552,8 @@ significant performance benefit over the JSSE. S3A uses the [WildFly OpenSSL](https://github.com/wildfly-security/wildfly-openssl) library to bind OpenSSL to the Java JSSE APIs. This library allows S3A to -transparently read data using OpenSSL. The wildfly-openssl library is a -runtime dependency of S3A and contains native libraries for binding the Java +transparently read data using OpenSSL. The `wildfly-openssl` library is an +optional runtime dependency of S3A and contains native libraries for binding the Java JSSE to OpenSSL. WildFly OpenSSL must load OpenSSL itself. This can be done using the system @@ -596,21 +596,41 @@ exception and S3A initialization will fail. Supported values for `fs.s3a.ssl.channel.mode`: -| fs.s3a.ssl.channel.mode Value | Description | +| `fs.s3a.ssl.channel.mode` Value | Description | |-------------------------------|-------------| -| default_jsse | Uses Java JSSE without GCM on Java 8 | -| default_jsse_with_gcm | Uses Java JSSE | -| default | Uses OpenSSL, falls back to default_jsse if OpenSSL cannot be loaded | -| openssl | Uses OpenSSL, fails if OpenSSL cannot be loaded | +| `default_jsse` | Uses Java JSSE without GCM on Java 8 | +| `default_jsse_with_gcm` | Uses Java JSSE | +| `default` | Uses OpenSSL, falls back to `default_jsse` if OpenSSL cannot be loaded | +| `openssl` | Uses OpenSSL, fails if OpenSSL cannot be loaded | The naming convention is setup in order to preserve backwards compatibility -with HADOOP-15669. +with the ABFS support of [HADOOP-15669](https://issues.apache.org/jira/browse/HADOOP-15669). Other options may be added to `fs.s3a.ssl.channel.mode` in the future as further SSL optimizations are made. +### WildFly classpath requirements + +For OpenSSL acceleration to work, a compatible version of the +wildfly JAR must be on the classpath. This is not explicitly declared +in the dependencies of the published `hadoop-aws` module, as it is +optional. + +If the wildfly JAR is not found, the network acceleration will fall back +to the JVM, always. + +Note: there have been compatibility problems with wildfly JARs and openSSL +releases in the past: version 1.0.4.Final is not compatible with openssl 1.1.1. +An extra complication was older versions of the `azure-data-lake-store-sdk` +JAR used in `hadoop-azure-datalake` contained an unshaded copy of the 1.0.4.Final +classes, causing binding problems even when a later version was explicitly +being placed on the classpath. + + ## Tuning FileSystem Initialization. +### Disabling bucket existence checks + When an S3A Filesystem instance is created and initialized, the client checks if the bucket provided is valid. This can be slow. You can ignore bucket validation by configuring `fs.s3a.bucket.probe` as follows: @@ -624,3 +644,52 @@ You can ignore bucket validation by configuring `fs.s3a.bucket.probe` as follows Note: if the bucket does not exist, this issue will surface when operations are performed on the filesystem; you will see `UnknownStoreException` stack traces. + +### Rate limiting parallel FileSystem creation operations + +Applications normally ask for filesystems from the shared cache, +via `FileSystem.get()` or `Path.getFileSystem()`. +The cache, `FileSystem.CACHE` will, for each user, cachec one instance of a filesystem +for a given URI. +All calls to `FileSystem.get` for a cached FS for a URI such +as `s3a://landsat-pds/` will return that singe single instance. + +FileSystem instances are created on-demand for the cache, +and will be done in each thread which requests an instance. +This is done outside of any synchronisation block. +Once a task has an initialized FileSystem instance, it will, in a synchronized block +add it to the cache. +If it turns out that the cache now already has an instance for that URI, it will +revert the cached copy to it, and close the FS instance it has just created. + +If a FileSystem takes time to be initialized, and many threads are trying to +retrieve a FileSystem instance for the same S3 bucket in parallel, +All but one of the threads will be doing useless work, and may unintentionally +be creating lock contention on shared objects. + +There is an option, `fs.creation.parallel.count`, which uses a semaphore +to limit the number of FS instances which may be created in parallel. + +Setting this to a low number will reduce the amount of wasted work, +at the expense of limiting the number of FileSystem clients which +can be created simultaneously for different object stores/distributed +filesystems. + +For example, a value of four would put an upper limit on the number +of wasted instantiations of a connector for the `s3a://landsat-pds/` +bucket. + +```xml + + fs.creation.parallel.count + 4 + +``` + +It would also mean that if four threads were in the process +of creating such connectors, all threads trying to create +connectors for other buckets, would end up blocking too. + +Consider experimenting with this when running applications +where many threads may try to simultaneously interact +with the same slow-to-initialize object stores. \ No newline at end of file diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3_select.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3_select.md index a684c3aa31e53..a5aaae91454be 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3_select.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3_select.md @@ -615,24 +615,10 @@ characters can be configured in the Hadoop configuration. **Consistency** -* Assume the usual S3 consistency model applies. +Since November 2020, AWS S3 has been fully consistent. +This also applies to S3 Select. +We do not know what happens if an object is overwritten while a query is active. -* When enabled, S3Guard's DynamoDB table will declare whether or not -a newly deleted file is visible: if it is marked as deleted, the -select request will be rejected with a `FileNotFoundException`. - -* When an existing S3-hosted object is changed, the S3 select operation -may return the results of a SELECT call as applied to either the old -or new version. - -* We don't know whether you can get partially consistent reads, or whether -an extended read ever picks up a later value. - -* The AWS S3 load balancers can briefly cache 404/Not-Found entries -from a failed HEAD/GET request against a nonexistent file; this cached -entry can briefly create create inconsistency, despite the -AWS "Create is consistent" model. There is no attempt to detect or recover from -this. **Concurrency** diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md index e6481f941bd4a..27ac10c825fb5 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md @@ -22,24 +22,39 @@ which can use a (consistent) database as the store of metadata about objects in an S3 bucket. +It was written been 2016 and 2020, *when Amazon S3 was eventually consistent.* +It compensated for the following S3 inconsistencies: +* Newly created objects excluded from directory listings. +* Newly deleted objects retained in directory listings. +* Deleted objects still visible in existence probes and opening for reading. +* S3 Load balancer 404 caching when a probe is made for an object before its creation. + +It did not compensate for update inconsistency, though by storing the etag +values of objects in the database, it could detect and report problems. + +Now that S3 is consistent, there is no need for S3Guard at all. + S3Guard -1. May improve performance on directory listing/scanning operations, +1. Permitted a consistent view of the object store. + +1. Could improve performance on directory listing/scanning operations. including those which take place during the partitioning period of query execution, the process where files are listed and the work divided up amongst processes. -1. Permits a consistent view of the object store. Without this, changes in -objects may not be immediately visible, especially in listing operations. -1. Offers a platform for future performance improvements for running Hadoop -workloads on top of object stores -The basic idea is that, for each operation in the Hadoop S3 client (s3a) that +The basic idea was that, for each operation in the Hadoop S3 client (s3a) that reads or modifies metadata, a shadow copy of that metadata is stored in a -separate MetadataStore implementation. Each MetadataStore implementation -offers HDFS-like consistency for the metadata, and may also provide faster -lookups for things like file status or directory listings. +separate MetadataStore implementation. The store was +1. Updated after mutating operations on the store +1. Updated after list operations against S3 discovered changes +1. Looked up whenever a probe was made for a file/directory existing. +1. Queried for all objects under a path when a directory listing was made; the results were + merged with the S3 listing in a non-authoritative path, used exclusively in + authoritative mode. + For links to early design documents and related patches, see [HADOOP-13345](https://issues.apache.org/jira/browse/HADOOP-13345). @@ -55,6 +70,19 @@ It is essential for all clients writing to an S3Guard-enabled S3 Repository to use the feature. Clients reading the data may work directly with the S3A data, in which case the normal S3 consistency guarantees apply. +## Moving off S3Guard + +How to move off S3Guard, given it is no longer needed. + +1. Unset the option `fs.s3a.metadatastore.impl` globally/for all buckets for which it + was selected. +1. If the option `org.apache.hadoop.fs.s3a.s3guard.disabled.warn.level` has been changed from +the default (`SILENT`), change it back. You no longer need to be warned that S3Guard is disabled. +1. Restart all applications. + +Once you are confident that all applications have been restarted, _Delete the DynamoDB table_. +This is to avoid paying for a database you no longer need. +This is best done from the AWS GUI. ## Setting up S3Guard @@ -70,7 +98,7 @@ without S3Guard. The following values are available: * `WARN`: Warn that data may be at risk in workflows. * `FAIL`: S3AFileSystem instantiation will fail. -The default setting is INFORM. The setting is case insensitive. +The default setting is `SILENT`. The setting is case insensitive. The required level can be set in the `core-site.xml`. --- @@ -113,7 +141,19 @@ Currently the only Metadata Store-independent setting, besides the implementation class above, are the *allow authoritative* and *fail-on-error* flags. -#### Allow Authoritative +#### Authoritative S3Guard + +Authoritative S3Guard is a complicated configuration which delivers performance +at the expense of being unsafe for other applications to use the same directory +tree/bucket unless configured consistently. + +It can also be used to support [directory marker retention](directory_markers.html) +in higher-performance but non-backwards-compatible modes. + +Most deployments do not use this setting -it is ony used in deployments where +specific parts of a bucket (e.g. Apache Hive managed tables) are known to +have exclusive access by a single application (Hive) and other tools/applications +from exactly the same Hadoop release. The _authoritative_ expression in S3Guard is present in two different layers, for two different reasons: @@ -178,7 +218,7 @@ recommended that you leave the default setting here: false ``` -. + Note that a MetadataStore MAY persist this bit in the directory listings. (Not MUST). @@ -1236,6 +1276,35 @@ Deleting the metadata store table will simply result in a period of eventual consistency for any file modifications that were made right before the table was deleted. +### Enabling a log message whenever S3Guard is *disabled* + +When dealing with support calls related to the S3A connector, "is S3Guard on?" +is the usual opening question. This can be determined by looking at the application logs for +messages about S3Guard starting -the absence of S3Guard can only be inferred by the absence +of such messages. + +There is a another strategy: have the S3A Connector log whenever *S3Guard is not enabled* + +This can be done in the configuration option `fs.s3a.s3guard.disabled.warn.level` + +```xml + + fs.s3a.s3guard.disabled.warn.level + silent + + Level to print a message when S3Guard is disabled. + Values: + "warn": log at WARN level + "inform": log at INFO level + "silent": log at DEBUG level + "fail": raise an exception + + +``` + +The `fail` option is clearly more than logging; it exists as an extreme debugging +tool. Use with care. + ### Failure Semantics Operations which modify metadata will make changes to S3 first. If, and only diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md index 5629dab21ff24..69f589ba7962f 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md @@ -324,6 +324,49 @@ Once a bucket is converted to being versioned, it cannot be converted back to being unversioned. +## Testing Different Marker Retention Policy + +Hadoop supports [different policies for directory marker retention](directory_markers.html) +-essentially the classic "delete" and the higher-performance "keep" options; "authoritative" +is just "keep" restricted to a part of the bucket. + +Example: test with `markers=delete` + +``` +mvn verify -Dparallel-tests -DtestsThreadCount=4 -Dmarkers=delete +``` + +Example: test with `markers=keep` + +``` +mvn verify -Dparallel-tests -DtestsThreadCount=4 -Dmarkers=keep +``` + +Example: test with `markers=authoritative` + +``` +mvn verify -Dparallel-tests -DtestsThreadCount=4 -Dmarkers=authoritative +``` + +This final option is of limited use unless paths in the bucket have actually been configured to be +of mixed status; unless anything is set up then the outcome should equal that of "delete" + +### Enabling auditing of markers + +To enable an audit of the output directory of every test suite, +enable the option `fs.s3a.directory.marker.audit` + +``` +-Dfs.s3a.directory.marker.audit=true +``` + +When set, if the marker policy is to delete markers under the test output directory, then +the marker tool audit command will be run. This will fail if a marker was found. + +This adds extra overhead to every operation, but helps verify that the connector is +not keeping markers where it needs to be deleting them -and hence backwards compatibility +is maintained. + ## Scale Tests There are a set of tests designed to measure the scalability and performance @@ -931,16 +974,18 @@ using an absolute XInclude reference to it. **Warning do not enable any type of failure injection in production. The following settings are for testing only.** -One of the challenges with S3A integration tests is the fact that S3 is an -eventually-consistent storage system. In practice, we rarely see delays in -visibility of recently created objects both in listings (`listStatus()`) and -when getting a single file's metadata (`getFileStatus()`). Since this behavior -is rare and non-deterministic, thorough integration testing is challenging. - -To address this, S3A supports a shim layer on top of the `AmazonS3Client` +One of the challenges with S3A integration tests is the fact that S3 was an +eventually-consistent storage system. To simulate inconsistencies more +frequently than they would normally surface, S3A supports a shim layer on top of the `AmazonS3Client` class which artificially delays certain paths from appearing in listings. This is implemented in the class `InconsistentAmazonS3Client`. +Now that S3 is consistent, injecting failures during integration and +functional testing is less important. +There's no need to enable it to verify that S3Guard can recover +from consistencies, given that in production such consistencies +will never surface. + ## Simulating List Inconsistencies ### Enabling the InconsistentAmazonS3CClient @@ -1019,9 +1064,6 @@ The default is 5000 milliseconds (five seconds). ``` -Future versions of this client will introduce new failure modes, -with simulation of S3 throttling exceptions the next feature under -development. ### Limitations of Inconsistency Injection @@ -1061,8 +1103,12 @@ inconsistent directory listings. ## Testing S3Guard -[S3Guard](./s3guard.html) is an extension to S3A which adds consistent metadata -listings to the S3A client. As it is part of S3A, it also needs to be tested. +[S3Guard](./s3guard.html) is an extension to S3A which added consistent metadata +listings to the S3A client. + +It has not been needed for applications to work safely with AWS S3 since November +2020. However, it is currently still part of the codebase, and so something which +needs to be tested. The basic strategy for testing S3Guard correctness consists of: @@ -1456,36 +1502,69 @@ export HADOOP_OPTIONAL_TOOLS="hadoop-aws" Run some basic s3guard commands as well as file operations. ```bash -export BUCKET=s3a://example-bucket-name +export BUCKETNAME=example-bucket-name +export BUCKET=s3a://$BUCKETNAME bin/hadoop s3guard bucket-info $BUCKET + +bin/hadoop s3guard uploads $BUCKET +# repeat twice, once with "no" and once with "yes" as responses +bin/hadoop s3guard uploads -abort $BUCKET + +# --------------------------------------------------- +# assuming s3guard is enabled +# if on pay-by-request, expect an error message and exit code of -1 bin/hadoop s3guard set-capacity $BUCKET + +# skip for PAY_PER_REQUEST bin/hadoop s3guard set-capacity -read 15 -write 15 $BUCKET -bin/hadoop s3guard uploads $BUCKET + +bin/hadoop s3guard bucket-info -guarded $BUCKET + bin/hadoop s3guard diff $BUCKET/ bin/hadoop s3guard prune -minutes 10 $BUCKET/ -bin/hadoop s3guard import $BUCKET/ +bin/hadoop s3guard import -verbose $BUCKET/ +bin/hadoop s3guard authoritative -verbose $BUCKET + +# --------------------------------------------------- +# root filesystem operatios +# --------------------------------------------------- + bin/hadoop fs -ls $BUCKET/ +# assuming file is not yet created, expect error and status code of 1 bin/hadoop fs -ls $BUCKET/file + +# exit code of 0 even when path doesn't exist bin/hadoop fs -rm -R -f $BUCKET/dir-no-trailing bin/hadoop fs -rm -R -f $BUCKET/dir-trailing/ + +# error because it is a directory bin/hadoop fs -rm $BUCKET/ + bin/hadoop fs -touchz $BUCKET/file -# expect I/O error as root dir is not empty +# expect I/O error as it is the root directory bin/hadoop fs -rm -r $BUCKET/ + +# succeeds bin/hadoop fs -rm -r $BUCKET/\* -# now success -bin/hadoop fs -rm -r $BUCKET/ + +# --------------------------------------------------- +# File operations +# --------------------------------------------------- bin/hadoop fs -mkdir $BUCKET/dir-no-trailing -# fails with S3Guard +# used to fail with S3Guard bin/hadoop fs -mkdir $BUCKET/dir-trailing/ bin/hadoop fs -touchz $BUCKET/file bin/hadoop fs -ls $BUCKET/ bin/hadoop fs -mv $BUCKET/file $BUCKET/file2 # expect "No such file or directory" bin/hadoop fs -stat $BUCKET/file + +# expect success bin/hadoop fs -stat $BUCKET/file2 + +# expect "file exists" bin/hadoop fs -mkdir $BUCKET/dir-no-trailing bin/hadoop fs -mv $BUCKET/file2 $BUCKET/dir-no-trailing bin/hadoop fs -stat $BUCKET/dir-no-trailing/file2 @@ -1502,8 +1581,57 @@ bin/hadoop fs -checksum $BUCKET/dir-no-trailing/file2 # expect "etag" + a long string bin/hadoop fs -D fs.s3a.etag.checksum.enabled=true -checksum $BUCKET/dir-no-trailing/file2 bin/hadoop fs -expunge -immediate -fs $BUCKET + +# --------------------------------------------------- +# Delegation Token support +# --------------------------------------------------- + +# failure unless delegation tokens are enabled bin/hdfs fetchdt --webservice $BUCKET secrets.bin +# success bin/hdfs fetchdt -D fs.s3a.delegation.token.binding=org.apache.hadoop.fs.s3a.auth.delegation.SessionTokenBinding --webservice $BUCKET secrets.bin +bin/hdfs fetchdt -print secrets.bin + +# expect warning "No TokenRenewer defined for token kind S3ADelegationToken/Session" +bin/hdfs fetchdt -renew secrets.bin + +# --------------------------------------------------- +# Directory markers +# --------------------------------------------------- + +# require success +bin/hadoop s3guard bucket-info -markers aware $BUCKET +# expect failure unless bucket policy is keep +bin/hadoop s3guard bucket-info -markers keep $BUCKET/path + +# you may need to set this on a per-bucket basis if you have already been +# playing with options +bin/hadoop s3guard -D fs.s3a.directory.marker.retention=keep bucket-info -markers keep $BUCKET/path +bin/hadoop s3guard -D fs.s3a.bucket.$BUCKETNAME.directory.marker.retention=keep bucket-info -markers keep $BUCKET/path + +# expect to see "Directory markers will be kept" messages and status code of "46" +bin/hadoop fs -D fs.s3a.bucket.$BUCKETNAME.directory.marker.retention=keep -mkdir $BUCKET/p1 +bin/hadoop fs -D fs.s3a.bucket.$BUCKETNAME.directory.marker.retention=keep -mkdir $BUCKET/p1/p2 +bin/hadoop fs -D fs.s3a.bucket.$BUCKETNAME.directory.marker.retention=keep -touchz $BUCKET/p1/p2/file + +# expect failure as markers will be found for /p1/ and /p1/p2/ +bin/hadoop s3guard markers -audit -verbose $BUCKET + +# clean will remove markers +bin/hadoop s3guard markers -clean -verbose $BUCKET + +# expect success and exit code of 0 +bin/hadoop s3guard markers -audit -verbose $BUCKET + +# --------------------------------------------------- +# S3 Select on Landsat +# --------------------------------------------------- + +export LANDSATGZ=s3a://landsat-pds/scene_list.gz + +bin/hadoop s3guard select -header use -compression gzip $LANDSATGZ \ + "SELECT s.entityId,s.cloudCover FROM S3OBJECT s WHERE s.cloudCover < '0.0' LIMIT 100" + ``` ### Other tests diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md index 47bc81e0ec4b3..416793b8ed957 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md @@ -92,6 +92,19 @@ classpath, do not add any of the `aws-sdk-` JARs. This happens if the `hadoop-aws` and `hadoop-common` JARs are out of sync. You can't mix them around: they have to have exactly matching version numbers. +### `java.lang.NoClassDefFoundError: org/wildfly/openssl/OpenSSLProvider` + +This happens when OpenSSL performance +acceleration has been configured by setting `fs.s3a.ssl.channel.mode` +to `openssl` but the wildfly JAR is not on the classpath. + +Fixes: +* Add it to the classpath +* Use a different channel mode, including `default`, which will +revert to the JVM SSL implementation when the wildfly +or native openssl libraries cannot be loaded. + + ## Authentication Failure If Hadoop cannot authenticate with the S3 service endpoint, @@ -235,6 +248,57 @@ As an example, the endpoint for S3 Frankfurt is `s3.eu-central-1.amazonaws.com`: ``` +### `Class does not implement AWSCredentialsProvider` + +A credential provider listed in `fs.s3a.aws.credentials.provider` does not implement +the interface `com.amazonaws.auth.AWSCredentialsProvider`. + +``` + Cause: java.lang.RuntimeException: java.io.IOException: Class class com.amazonaws.auth.EnvironmentVariableCredentialsProvider does not implement AWSCredentialsProvider + at org.apache.hadoop.hive.ql.session.SessionState.start(SessionState.java:686) + at org.apache.hadoop.hive.ql.session.SessionState.start(SessionState.java:621) + at org.apache.spark.sql.hive.client.HiveClientImpl.newState(HiveClientImpl.scala:219) + at org.apache.spark.sql.hive.client.HiveClientImpl.(HiveClientImpl.scala:126) + at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) + at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) + at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) + at java.lang.reflect.Constructor.newInstance(Constructor.java:423) + at org.apache.spark.sql.hive.client.IsolatedClientLoader.createClient(IsolatedClientLoader.scala:306) + at org.apache.spark.sql.hive.HiveUtils$.newClientForMetadata(HiveUtils.scala:433) + ... + Cause: java.io.IOException: Class class com.amazonaws.auth.EnvironmentVariableCredentialsProvider does not implement AWSCredentialsProvider + at org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProvider(S3AUtils.java:722) + at org.apache.hadoop.fs.s3a.S3AUtils.buildAWSProviderList(S3AUtils.java:687) + at org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProviderSet(S3AUtils.java:620) + at org.apache.hadoop.fs.s3a.S3AFileSystem.bindAWSClient(S3AFileSystem.java:673) + at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:414) + at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3462) + at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:171) + at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3522) + at org.apache.hadoop.fs.FileSystem$Cache.getUnique(FileSystem.java:3496) + at org.apache.hadoop.fs.FileSystem.newInstance(FileSystem.java:591) +``` + +There's two main causes + +1. A class listed there is not an implementation of the interface. + Fix: review the settings and correct as appropriate. +1. A class listed there does implement the interface, but it has been loaded in a different + classloader, so the JVM does not consider it to be an implementation. + Fix: learn the entire JVM classloader model and see if you can then debug it. + Tip: having both the AWS Shaded SDK and individual AWS SDK modules on your classpath + may be a cause of this. + +If you see this and you are trying to use the S3A connector with Spark, then the cause can +be that the isolated classloader used to load Hive classes is interfering with the S3A +connector's dynamic loading of `com.amazonaws` classes. To fix this, declare that that +the classes in the aws SDK are loaded from the same classloader which instantiated +the S3A FileSystem instance: + +``` +spark.sql.hive.metastore.sharedPrefixes com.amazonaws. +``` + ## "The security token included in the request is invalid" You are trying to use session/temporary credentials and the session token @@ -410,6 +474,45 @@ If the client using [assumed roles](assumed_roles.html), and a policy is set in `fs.s3a.assumed.role.policy`, then that policy declares _all_ the rights which the caller has. +### `AccessDeniedException` in rename, "MultiObjectDeleteException: One or more objects could not be deleted" + + +``` +mv: rename s3a://london/dest to s3a://london/src on +s3a://london/dest: + com.amazonaws.services.s3.model.MultiObjectDeleteException: One or more objects + could not be deleted (Service: null; Status Code: 200; Error Code: null; Request + ID: 5C9018EF245F02C5; S3 Extended Request ID: + 5fQ2RVCPF0rdvADRv2XY3U4yb2J0gHRID/4jm1eqCXp7RxpU0dH9DliChYsCUD1aVCFtbwfWJWY=), + S3 Extended Request ID: + 5fQ2RVCPF0rdvADRv2XY3U4yb2J0gHRID/4jm1eqCXp7RxpU0dH9DliChYsCUD1aVCFtbwfWJWY=:null: + AccessDenied: dest/file10: Access Denied +``` + +The S3A connector's emulation of file and directory rename is implemented by copying each file, +then deleting the originals. This delete process is done in batches, by default in a single +"multiple object delete request". If one or more of the objects listed in the request cannot +be deleted, an error is returned in S3 listing which objects were not deleted. +If the cause was "access denied", it is translated into an `AccessDeniedException`. + +The rename is halted at this point: files may be present in both the source and destination directories. +Those files which could not be deleted from the source directory will also have been copied +into the destination directory. Files which were successfully deleted from the source +directory will _only_ be in the destination. And files for which the rename operation +had yet to commence -they will only be in the source tree. + +The user has to recover from this themselves. Be assured: no data will have been deleted, it +is just that the data may now be scattered across two directories. +Note: this is one reason why any application which tries to atomically commit work +via rename (classic Hadoop output committers, distcp with the `-atomic` option) are +not safe to use with S3. It is not a file system. + +For an 'AccessDenied' failure, the root cause is IAM permissions. +The user/role/bucket must have the permission +`s3:DeleteObject` on the source path. It is safest to grant `s3:Delete*` so +that if a future version of the S3A connector supported extra operations +(explicit deletion of versioned files, get/set/delete object tagging, ...), +the client will have the permission to use them. ### `AccessDeniedException` when using SSE-KMS @@ -915,61 +1018,6 @@ Something has been trying to write data to "/". These are the issues where S3 does not appear to behave the way a filesystem "should". -### Visible S3 Inconsistency - -Amazon S3 is *an eventually consistent object store*. That is: not a filesystem. - -To reduce visible inconsistencies, use the [S3Guard](./s3guard.html) consistency -cache. - - -By default, Amazon S3 offers read-after-create consistency: a newly created file -is immediately visible. -There is a small quirk: a negative GET may be cached, such -that even if an object is immediately created, the fact that there "wasn't" -an object is still remembered. - -That means the following sequence on its own will be consistent -``` -touch(path) -> getFileStatus(path) -``` - -But this sequence *may* be inconsistent. - -``` -getFileStatus(path) -> touch(path) -> getFileStatus(path) -``` - -A common source of visible inconsistencies is that the S3 metadata -database —the part of S3 which serves list requests— is updated asynchronously. -Newly added or deleted files may not be visible in the index, even though direct -operations on the object (`HEAD` and `GET`) succeed. - -That means the `getFileStatus()` and `open()` operations are more likely -to be consistent with the state of the object store, but without S3Guard enabled, -directory list operations such as `listStatus()`, `listFiles()`, `listLocatedStatus()`, -and `listStatusIterator()` may not see newly created files, and still list -old files. - -### `FileNotFoundException` even though the file was just written. - -This can be a sign of consistency problems. It may also surface if there is some -asynchronous file write operation still in progress in the client: the operation -has returned, but the write has not yet completed. While the S3A client code -does block during the `close()` operation, we suspect that asynchronous writes -may be taking place somewhere in the stack —this could explain why parallel tests -fail more often than serialized tests. - -### File not found in a directory listing, even though `getFileStatus()` finds it - -(Similarly: deleted file found in listing, though `getFileStatus()` reports -that it is not there) - -This is a visible sign of updates to the metadata server lagging -behind the state of the underlying filesystem. - -Fix: Use [S3Guard](s3guard.html). - ### File not visible/saved @@ -1056,6 +1104,11 @@ for more information. A file being renamed and listed in the S3Guard table could not be found in the S3 bucket even after multiple attempts. +Now that S3 is consistent, this is sign that the S3Guard table is out of sync with +the S3 Data. + +Fix: disable S3Guard: it is no longer needed. + ``` org.apache.hadoop.fs.s3a.RemoteFileChangedException: copyFile(/sourcedir/missing, /destdir/) `s3a://example/sourcedir/missing': File not found on S3 after repeated attempts: `s3a://example/sourcedir/missing' @@ -1066,10 +1119,6 @@ at org.apache.hadoop.fs.s3a.impl.RenameOperation.copySourceAndUpdateTracker(Rena at org.apache.hadoop.fs.s3a.impl.RenameOperation.lambda$initiateCopy$0(RenameOperation.java:412) ``` -Either the file has been deleted, or an attempt was made to read a file before it -was created and the S3 load balancer has briefly cached the 404 returned by that -operation. This is something which AWS S3 can do for short periods. - If error occurs and the file is on S3, consider increasing the value of `fs.s3a.s3guard.consistency.retry.limit`. @@ -1077,29 +1126,40 @@ We also recommend using applications/application options which do not rename files when committing work or when copying data to S3, but instead write directly to the final destination. -### `RemoteFileChangedException`: "File to rename not found on unguarded S3 store" - -``` -org.apache.hadoop.fs.s3a.RemoteFileChangedException: copyFile(/sourcedir/missing, /destdir/) - `s3a://example/sourcedir/missing': File to rename not found on unguarded S3 store: `s3a://example/sourcedir/missing' -at org.apache.hadoop.fs.s3a.S3AFileSystem.copyFile(S3AFileSystem.java:3231) -at org.apache.hadoop.fs.s3a.S3AFileSystem.access$700(S3AFileSystem.java:177) -at org.apache.hadoop.fs.s3a.S3AFileSystem$RenameOperationCallbacksImpl.copyFile(S3AFileSystem.java:1368) -at org.apache.hadoop.fs.s3a.impl.RenameOperation.copySourceAndUpdateTracker(RenameOperation.java:448) -at org.apache.hadoop.fs.s3a.impl.RenameOperation.lambda$initiateCopy$0(RenameOperation.java:412) -``` - -An attempt was made to rename a file in an S3 store not protected by SGuard, -the directory list operation included the filename in its results but the -actual operation to rename the file failed. - -This can happen because S3 directory listings and the store itself are not -consistent: the list operation tends to lag changes in the store. -It is possible that the file has been deleted. - -The fix here is to use S3Guard. We also recommend using applications/application -options which do not rename files when committing work or when copying data -to S3, but instead write directly to the final destination. +## Rename not behaving as "expected" + +S3 is not a filesystem. The S3A connector mimics file and directory rename by + +* HEAD then LIST of source path. The source MUST exist, else a `FileNotFoundException` + is raised. +* HEAD then LIST of the destination path. + This SHOULD NOT exist. + If it does and if the source is a directory, the destination MUST be an empty directory. + If the source is a file, the destination MAY be a directory, empty or not. + If the destination exists and relevant conditions are not met, a `FileAlreadyExistsException` + is raised. +* If the destination path does not exist, a HEAD request of the parent path + to verify that there is no object there. + Directory markers are not checked for, nor that the path has any children, +* File-by-file copy of source objects to destination. + Parallelized, with page listings of directory objects and issuing of DELETE requests. +* Post-delete recreation of source parent directory marker, if needed. + +This is slow (`O(data)`) and can cause timeouts on code which is required +to send regular progress reports/heartbeats -for example, distCp. +It is _very unsafe_ if the calling code expects atomic renaming as part +of any commit algorithm. +This is why the [S3A Committers](committers.md) or similar are needed to safely +commit output. + +There is also the risk of race conditions arising if many processes/threads +are working with the same directory tree +[HADOOP-16721](https://issues.apache.org/jira/browse/HADOOP-16721). + +To reduce this risk, since Hadoop 3.3.1, the S3A connector no longer verifies the parent directory +of the destination of a rename is a directory -only that it is _not_ a file. +You can rename a directory or file deep under a file if you try -after which +there is no guarantee of the files being found in listings. Try not to do that. ## S3 Server Side Encryption @@ -1203,6 +1263,24 @@ a new one than read to the end of a large file. Note: the threshold when data is read rather than the stream aborted can be tuned by `fs.s3a.readahead.range`; seek policy in `fs.s3a.experimental.input.fadvise`. +### `PathIOException` Number of parts in multipart upload exceeded. + +Number of parts in multipart upload exceeded + +``` +org.apache.hadoop.fs.PathIOException: `test/testMultiPartUploadFailure': Number of parts in multipart upload exceeded. Current part count = X, Part count limit = Y + + at org.apache.hadoop.fs.s3a.WriteOperationHelper.newUploadPartRequest(WriteOperationHelper.java:432) + at org.apache.hadoop.fs.s3a.S3ABlockOutputStream$MultiPartUpload.uploadBlockAsync(S3ABlockOutputStream.java:627) + at org.apache.hadoop.fs.s3a.S3ABlockOutputStream$MultiPartUpload.access$000(S3ABlockOutputStream.java:532) + at org.apache.hadoop.fs.s3a.S3ABlockOutputStream.uploadCurrentBlock(S3ABlockOutputStream.java:316) + at org.apache.hadoop.fs.s3a.S3ABlockOutputStream.write(S3ABlockOutputStream.java:301) +``` + +This is a known issue where upload fails if number of parts +is more than 10000 (specified by aws SDK). You can configure +`fs.s3a.multipart.size` to reduce the number of parts. + ### `UnknownStoreException` Bucket does not exist. The bucket does not exist. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java index 740f256b62e2c..ae54dfee0004e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java @@ -79,15 +79,15 @@ protected Path path(final String filepath) throws IOException { @Override public void testDirectWrite() throws Exception { - resetStorageStatistics(); + final long renames = getRenameOperationCount(); super.testDirectWrite(); assertEquals("Expected no renames for a direct write distcp", 0L, - getRenameOperationCount()); + getRenameOperationCount() - renames); } @Override public void testNonDirectWrite() throws Exception { - resetStorageStatistics(); + final long renames = getRenameOperationCount(); try { super.testNonDirectWrite(); } catch (FileNotFoundException e) { @@ -96,11 +96,7 @@ public void testNonDirectWrite() throws Exception { // S3Guard is not enabled } assertEquals("Expected 2 renames for a non-direct write distcp", 2L, - getRenameOperationCount()); - } - - private void resetStorageStatistics() { - getFileSystem().getStorageStatistics().reset(); + getRenameOperationCount() - renames); } private long getRenameOperationCount() { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java index 059312a8103a3..8222fff614598 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java @@ -15,25 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.fs.contract.s3a; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package org.apache.hadoop.fs.contract.s3a; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest; -import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.s3a.S3AFileSystem; -import org.apache.hadoop.fs.s3a.WriteOperationHelper; -import static org.apache.hadoop.fs.s3a.S3ATestConstants.*; -import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.DEFAULT_SCALE_TESTS_ENABLED; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.KEY_HUGE_PARTITION_SIZE; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.KEY_SCALE_TESTS_ENABLED; +import static org.apache.hadoop.fs.s3a.S3ATestConstants.SCALE_TEST_TIMEOUT_MILLIS; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestPropertyBool; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestPropertyBytes; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeEnableS3Guard; import static org.apache.hadoop.fs.s3a.scale.AbstractSTestS3AHugeFiles.DEFAULT_HUGE_PARTITION_SIZE; /** * Test MultipartUploader with S3A. + *

    * Although not an S3A Scale test subclass, it uses the -Dscale option * to enable it, and partition size option to control the size of * parts uploaded. @@ -41,14 +44,11 @@ public class ITestS3AContractMultipartUploader extends AbstractContractMultipartUploaderTest { - private static final Logger LOG = - LoggerFactory.getLogger(ITestS3AContractMultipartUploader.class); - private int partitionSize; /** * S3 requires a minimum part size of 5MB (except the last part). - * @return 5MB + * @return 5MB+ value */ @Override protected int partSizeInBytes() { @@ -126,37 +126,15 @@ public void setup() throws Exception { DEFAULT_HUGE_PARTITION_SIZE); } - /** - * Extend superclass teardown with actions to help clean up the S3 store, - * including aborting uploads under the test path. - */ - @Override - public void teardown() throws Exception { - Path teardown = path("teardown").getParent(); - S3AFileSystem fs = getFileSystem(); - if (fs != null) { - WriteOperationHelper helper = fs.getWriteOperationHelper(); - try { - LOG.info("Teardown: aborting outstanding uploads under {}", teardown); - int count = helper.abortMultipartUploadsUnderPath( - fs.pathToKey(teardown)); - LOG.info("Found {} incomplete uploads", count); - } catch (Exception e) { - LOG.warn("Exeception in teardown", e); - } - } - super.teardown(); - } - /** * S3 has no concept of directories, so this test does not apply. */ public void testDirectoryInTheWay() throws Exception { - // no-op + skip("unsupported"); } @Override public void testMultipartUploadReverseOrder() throws Exception { - ContractTestUtils.skip("skipped for speed"); + skip("skipped for speed"); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java index e623d5d27941b..e44df5facd2c3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.s3a.Statistic; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; +import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyFileContents; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; @@ -106,9 +107,7 @@ public void setup() throws Exception { @Override public void testRenameDirIntoExistingDir() throws Throwable { - describe("Verify renaming a dir into an existing dir puts the files" - +" from the source dir into the existing dir" - +" and leaves existing files alone"); + describe("S3A rename into an existing directory returns false"); FileSystem fs = getFileSystem(); String sourceSubdir = "source"; Path srcDir = path(sourceSubdir); @@ -169,4 +168,9 @@ public void testRenamePopulatesFileAncestors2() throws Exception { validateAncestorsMoved(src, dest, nestedFile); } + + @Override + public void testRenameFileUnderFileSubdir() throws Exception { + skip("Rename deep paths under files is allowed"); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java index fca1004ae9a57..bc8b71b3b3bf4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java @@ -44,7 +44,7 @@ import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; import org.apache.hadoop.util.NativeCodeLoader; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Constants.INPUT_FADVISE; import static org.apache.hadoop.fs.s3a.Constants.INPUT_FADV_NORMAL; import static org.apache.hadoop.fs.s3a.Constants.INPUT_FADV_RANDOM; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java index 13110201b7e37..0d3dd4c2f6606 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/S3AContract.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.AbstractBondedFSContract; +import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; /** @@ -53,6 +54,9 @@ public S3AContract(Configuration conf) { */ public S3AContract(Configuration conf, boolean addContractResource) { super(conf); + // Force deprecated key load through the + // static initializers. See: HADOOP-17385 + S3AFileSystem.initializeClass(); //insert the base features if (addContractResource) { addConfResource(CONTRACT_XML); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java index 99bab73e71c33..6afdd76ca4432 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3AMockTest.java @@ -26,7 +26,6 @@ import java.net.URI; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.s3a.commit.CommitConstants; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.NullMetadataStore; @@ -71,8 +70,6 @@ public Configuration createConfiguration() { // test we don't issue request to AWS DynamoDB service. conf.setClass(S3_METADATA_STORE_IMPL, NullMetadataStore.class, MetadataStore.class); - // FS is always magic - conf.setBoolean(CommitConstants.MAGIC_COMMITTER_ENABLED, true); // use minimum multipart size for faster triggering conf.setLong(Constants.MULTIPART_SIZE, MULTIPART_MIN_SIZE); conf.setInt(Constants.S3A_BUCKET_PROBE, 1); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java index d94288dfc307f..b1ea45f686528 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java @@ -25,16 +25,25 @@ import org.apache.hadoop.fs.contract.AbstractFSContractTestBase; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.s3a.S3AContract; +import org.apache.hadoop.fs.s3a.tools.MarkerTool; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; import org.apache.hadoop.io.IOUtils; -import org.junit.Before; + +import org.junit.AfterClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.FileNotFoundException; import java.io.IOException; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestDynamoTablePrefix; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestPropertyBool; +import static org.apache.hadoop.fs.s3a.S3AUtils.E_FS_CLOSED; +import static org.apache.hadoop.fs.s3a.tools.MarkerTool.UNLIMITED_LISTING; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.snapshotIOStatistics; /** * An extension of the contract test base set up for S3A tests. @@ -44,6 +53,12 @@ public abstract class AbstractS3ATestBase extends AbstractFSContractTestBase protected static final Logger LOG = LoggerFactory.getLogger(AbstractS3ATestBase.class); + /** + * FileSystem statistics are collected across every test case. + */ + protected static final IOStatisticsSnapshot FILESYSTEM_IOSTATS = + snapshotIOStatistics(); + @Override protected AbstractFSContract createContract(Configuration conf) { return new S3AContract(conf, false); @@ -56,24 +71,77 @@ public void setup() throws Exception { // filesystems which add default configuration resources to do it before // our tests start adding/removing options. See HADOOP-16626. FileSystem.getLocal(new Configuration()); + // Force deprecated key load through the + // static initializers. See: HADOOP-17385 + S3AFileSystem.initializeClass(); super.setup(); } @Override public void teardown() throws Exception { Thread.currentThread().setName("teardown"); + + maybeAuditTestPath(); + super.teardown(); + if (getFileSystem() != null) { + FILESYSTEM_IOSTATS.aggregate(getFileSystem().getIOStatistics()); + } describe("closing file system"); IOUtils.closeStream(getFileSystem()); } - @Before - public void nameThread() { - Thread.currentThread().setName("JUnit-" + getMethodName()); + /** + * Dump the filesystem statistics after the class. + */ + @AfterClass + public static void dumpFileSystemIOStatistics() { + LOG.info("Aggregate FileSystem Statistics {}", + ioStatisticsToPrettyString(FILESYSTEM_IOSTATS)); } - protected String getMethodName() { - return methodName.getMethodName(); + /** + * Audit the FS under {@link #methodPath()} if + * the test option {@link #DIRECTORY_MARKER_AUDIT} is + * true. + */ + public void maybeAuditTestPath() { + final S3AFileSystem fs = getFileSystem(); + if (fs != null) { + try { + boolean audit = getTestPropertyBool(fs.getConf(), + DIRECTORY_MARKER_AUDIT, false); + Path methodPath = methodPath(); + if (audit + && !fs.getDirectoryMarkerPolicy() + .keepDirectoryMarkers(methodPath) + && fs.isDirectory(methodPath)) { + MarkerTool.ScanResult result = MarkerTool.execMarkerTool( + new MarkerTool.ScanArgsBuilder() + .withSourceFS(fs) + .withPath(methodPath) + .withDoPurge(true) + .withMinMarkerCount(0) + .withMaxMarkerCount(0) + .withLimit(UNLIMITED_LISTING) + .withNonAuth(false) + .build()); + final String resultStr = result.toString(); + assertEquals("Audit of " + methodPath + " failed: " + + resultStr, + 0, result.getExitCode()); + assertEquals("Marker Count under " + methodPath + + " non-zero: " + resultStr, + 0, result.getFilteredMarkerCount()); + } + } catch (FileNotFoundException ignored) { + } catch (Exception e) { + // If is this is not due to the FS being closed: log. + if (!e.toString().contains(E_FS_CLOSED)) { + LOG.warn("Marker Tool Failure", e); + } + } + } } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAuthoritativePath.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAuthoritativePath.java index 0a91102bf5aa6..b1d742a400505 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAuthoritativePath.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAuthoritativePath.java @@ -109,7 +109,8 @@ private S3AFileSystem createFullyAuthFS() URI uri = testFS.getUri(); removeBaseAndBucketOverrides(uri.getHost(), config, - METADATASTORE_AUTHORITATIVE); + METADATASTORE_AUTHORITATIVE, + AUTHORITATIVE_PATH); config.setBoolean(METADATASTORE_AUTHORITATIVE, true); final S3AFileSystem newFS = createFS(uri, config); // set back the same metadata store instance @@ -124,7 +125,8 @@ private S3AFileSystem createSinglePathAuthFS(String authPath) URI uri = testFS.getUri(); removeBaseAndBucketOverrides(uri.getHost(), config, - METADATASTORE_AUTHORITATIVE); + METADATASTORE_AUTHORITATIVE, + AUTHORITATIVE_PATH); config.set(AUTHORITATIVE_PATH, authPath.toString()); final S3AFileSystem newFS = createFS(uri, config); // set back the same metadata store instance @@ -139,7 +141,8 @@ private S3AFileSystem createMultiPathAuthFS(String first, String middle, String URI uri = testFS.getUri(); removeBaseAndBucketOverrides(uri.getHost(), config, - METADATASTORE_AUTHORITATIVE); + METADATASTORE_AUTHORITATIVE, + AUTHORITATIVE_PATH); config.set(AUTHORITATIVE_PATH, first + "," + middle + "," + last); final S3AFileSystem newFS = createFS(uri, config); // set back the same metadata store instance @@ -155,7 +158,8 @@ private S3AFileSystem createRawFS() throws Exception { removeBaseAndBucketOverrides(uri.getHost(), config, S3_METADATA_STORE_IMPL); removeBaseAndBucketOverrides(uri.getHost(), config, - METADATASTORE_AUTHORITATIVE); + METADATASTORE_AUTHORITATIVE, + AUTHORITATIVE_PATH); return createFS(uri, config); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java index 3dfe286bc7b65..55423273b9579 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java @@ -18,7 +18,6 @@ package org.apache.hadoop.fs.s3a; -import com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.SemaphoredDelegatingExecutor; import org.apache.hadoop.util.StopWatch; @@ -31,11 +30,12 @@ import org.slf4j.LoggerFactory; import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; /** * Basic test for S3A's blocking executor service. @@ -70,7 +70,7 @@ public static void afterClass() throws Exception { @Test public void testSubmitCallable() throws Exception { ensureCreated(); - ListenableFuture f = tpe.submit(callableSleeper); + Future f = tpe.submit(callableSleeper); Integer v = f.get(); assertEquals(SOME_VALUE, v); } @@ -92,11 +92,12 @@ public void testSubmitRunnable() throws Exception { */ protected void verifyQueueSize(ExecutorService executorService, int expectedQueueSize) { - StopWatch stopWatch = new StopWatch().start(); + CountDownLatch latch = new CountDownLatch(1); for (int i = 0; i < expectedQueueSize; i++) { - executorService.submit(sleeper); - assertDidntBlock(stopWatch); + executorService.submit(new LatchedSleeper(latch)); } + StopWatch stopWatch = new StopWatch().start(); + latch.countDown(); executorService.submit(sleeper); assertDidBlock(stopWatch); } @@ -124,15 +125,6 @@ public void testChainedQueue() throws Throwable { // Helper functions, etc. - private void assertDidntBlock(StopWatch sw) { - try { - assertFalse("Non-blocking call took too long.", - sw.now(TimeUnit.MILLISECONDS) > BLOCKING_THRESHOLD_MSEC); - } finally { - sw.reset().start(); - } - } - private void assertDidBlock(StopWatch sw) { try { if (sw.now(TimeUnit.MILLISECONDS) < BLOCKING_THRESHOLD_MSEC) { @@ -164,6 +156,25 @@ public Integer call() throws Exception { } }; + private class LatchedSleeper implements Runnable { + private final CountDownLatch latch; + + LatchedSleeper(CountDownLatch latch) { + this.latch = latch; + } + + @Override + public void run() { + try { + latch.await(); + Thread.sleep(TASK_SLEEP_MSEC); + } catch (InterruptedException e) { + LOG.info("Thread {} interrupted.", Thread.currentThread().getName()); + Thread.currentThread().interrupt(); + } + } + } + /** * Helper function to create thread pool under test. */ diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java index bd6bf2f6cdbc3..5b6e634a63168 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java @@ -18,23 +18,287 @@ package org.apache.hadoop.fs.s3a; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collection; + +import org.assertj.core.api.Assertions; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.s3a.auth.ITestRestrictedReadAccess; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.mapred.LocatedFileStatusFetcher; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; +import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; +import static org.apache.hadoop.fs.s3a.Constants.AUTHORITATIVE_PATH; +import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; +import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBucketOverrides; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticCounter; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.extractStatistics; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; +import static org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS; + /** * Test the LocatedFileStatusFetcher can do. * This is related to HADOOP-16458. * There's basic tests in ITestS3AFSMainOperations; this * is see if we can create better corner cases. + *

    + * Much of the class is based on tests in {@link ITestRestrictedReadAccess}, + * but whereas that tests failure paths, this looks at the performance + * of successful invocations. */ +@RunWith(Parameterized.class) public class ITestLocatedFileStatusFetcher extends AbstractS3ATestBase { private static final Logger LOG = LoggerFactory.getLogger(ITestLocatedFileStatusFetcher.class); + + /** + * Parameterization. + */ + @Parameterized.Parameters(name = "{0}") + public static Collection params() { + return Arrays.asList(new Object[][]{ + {"raw", false}, + {"nonauth", true} + }); + } + + /** Filter to select everything. */ + private static final PathFilter EVERYTHING = t -> true; + + /** Filter to select .txt files. */ + private static final PathFilter TEXT_FILE = + path -> path.toUri().toString().endsWith(".txt"); + + /** The same path filter used in FileInputFormat. */ + private static final PathFilter HIDDEN_FILE_FILTER = + (p) -> { + String n = p.getName(); + return !n.startsWith("_") && !n.startsWith("."); + }; + + /** + * Text found in LocatedFileStatusFetcher exception when the glob + * returned "null". + */ + private static final String DOES_NOT_EXIST = "does not exist"; + + /** + * Text found in LocatedFileStatusFetcher exception when + * the glob returned an empty list. + */ + private static final String MATCHES_0_FILES = "matches 0 files"; + + /** + * Text used in files. + */ + public static final byte[] HELLO = "hello".getBytes(StandardCharsets.UTF_8); + + /** + * How many list calls are expected in a run which collects them: {@value}. + */ + private static final int EXPECTED_LIST_COUNT = 4; + + private final String name; + + private final boolean s3guard; + + private Path basePath; + + private Path emptyDir; + + private Path emptyFile; + + private Path subDir; + + private Path subdirFile; + + private Path subDir2; + + private Path subdir2File1; + + private Path subdir2File2; + + private Configuration listConfig; + + public ITestLocatedFileStatusFetcher(final String name, + final boolean s3guard) { + this.name = name; + this.s3guard = s3guard; + } + + @Override + public Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + String bucketName = getTestBucketName(conf); + + removeBaseAndBucketOverrides(bucketName, conf, + METADATASTORE_AUTHORITATIVE, + AUTHORITATIVE_PATH); + removeBucketOverrides(bucketName, conf, + S3_METADATA_STORE_IMPL); + if (!s3guard) { + removeBaseAndBucketOverrides(bucketName, conf, + S3_METADATA_STORE_IMPL); + } + conf.setBoolean(METADATASTORE_AUTHORITATIVE, false); + disableFilesystemCaching(conf); + return conf; + } + @Override + public void setup() throws Exception { + super.setup(); + S3AFileSystem fs + = getFileSystem(); + // avoiding the parameterization to steer clear of accidentally creating + // patterns; a timestamp is used to ensure tombstones from previous runs + // do not interfere + basePath = path("ITestLocatedFileStatusFetcher-" + name + + "-" + System.currentTimeMillis() / 1000); + + // define the paths and create them. + describe("Creating test directories and files"); + + // an empty directory directory under the noReadDir + emptyDir = new Path(basePath, "emptyDir"); + fs.mkdirs(emptyDir); + + // an empty file directory under the noReadDir + emptyFile = new Path(basePath, "emptyFile.txt"); + touch(fs, emptyFile); + + // a subdirectory + subDir = new Path(basePath, "subDir"); + + // and a file in that subdirectory + subdirFile = new Path(subDir, "subdirFile.txt"); + createFile(fs, subdirFile, true, HELLO); + subDir2 = new Path(subDir, "subDir2"); + subdir2File1 = new Path(subDir2, "subdir2File1.txt"); + subdir2File2 = new Path(subDir2, "subdir2File2.txt"); + createFile(fs, subdir2File1, true, HELLO); + createFile(fs, subdir2File2, true, HELLO); + listConfig = new Configuration(getConfiguration()); + } + + + /** + * Assert that the fetcher stats logs the expected number of calls. + * @param fetcher fetcher + * @param expectedListCount expected number of list calls + */ + private void assertListCount(final LocatedFileStatusFetcher fetcher, + final int expectedListCount) { + IOStatistics iostats = extractStatistics(fetcher); + LOG.info("Statistics of fetcher: {}", iostats); + assertThatStatisticCounter(iostats, + OBJECT_LIST_REQUEST) + .describedAs("stats of %s", iostats) + .isEqualTo(expectedListCount); + } + + /** + * Run a located file status fetcher against the directory tree. + */ @Test - public void testGlobScan() throws Throwable { + public void testSingleThreadedLocatedFileStatus() throws Throwable { + + describe("LocatedFileStatusFetcher operations"); + // use the same filter as FileInputFormat; single thread. + listConfig.setInt(LIST_STATUS_NUM_THREADS, 1); + LocatedFileStatusFetcher fetcher = + new LocatedFileStatusFetcher( + listConfig, + new Path[]{basePath}, + true, + HIDDEN_FILE_FILTER, + true); + Iterable stats = fetcher.getFileStatuses(); + Assertions.assertThat(stats) + .describedAs("result of located scan") + .flatExtracting(FileStatus::getPath) + .containsExactlyInAnyOrder( + emptyFile, + subdirFile, + subdir2File1, + subdir2File2); + assertListCount(fetcher, EXPECTED_LIST_COUNT); } + + /** + * Run a located file status fetcher against the directory tree. + */ + @Test + public void testLocatedFileStatusFourThreads() throws Throwable { + + // four threads and the text filter. + int threads = 4; + describe("LocatedFileStatusFetcher with %d", threads); + listConfig.setInt(LIST_STATUS_NUM_THREADS, threads); + LocatedFileStatusFetcher fetcher = + new LocatedFileStatusFetcher( + listConfig, + new Path[]{basePath}, + true, + EVERYTHING, + true); + Iterable stats = fetcher.getFileStatuses(); + IOStatistics iostats = extractStatistics(fetcher); + LOG.info("Statistics of fetcher: {}", iostats); + Assertions.assertThat(stats) + .describedAs("result of located scan") + .isNotNull() + .flatExtracting(FileStatus::getPath) + .containsExactlyInAnyOrder( + emptyFile, + subdirFile, + subdir2File1, + subdir2File2); + assertListCount(fetcher, EXPECTED_LIST_COUNT); + } + + /** + * Run a located file status fetcher against a file. + */ + @Test + public void testLocatedFileStatusScanFile() throws Throwable { + // pass in a file as the base of the scan. + describe("LocatedFileStatusFetcher with file %s", subdirFile); + listConfig.setInt(LIST_STATUS_NUM_THREADS, 16); + LocatedFileStatusFetcher fetcher + = new LocatedFileStatusFetcher( + listConfig, + new Path[]{subdirFile}, + true, + TEXT_FILE, + true); + Iterable stats = fetcher.getFileStatuses(); + Assertions.assertThat(stats) + .describedAs("result of located scan") + .isNotNull() + .flatExtracting(FileStatus::getPath) + .containsExactly(subdirFile); + IOStatistics ioStatistics = fetcher.getIOStatistics(); + Assertions.assertThat(ioStatistics) + .describedAs("IO statistics of %s", fetcher) + .isNull(); + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java index afd3ec2bd3473..53fa0d83b55a7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java @@ -20,8 +20,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; import org.apache.hadoop.io.IOUtils; import org.junit.BeforeClass; @@ -31,7 +33,10 @@ import java.io.InputStream; import java.net.URI; +import static org.apache.hadoop.fs.StreamCapabilities.ABORTABLE_STREAM; import static org.apache.hadoop.fs.s3a.Constants.*; +import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.assertCompleteAbort; +import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.assertNoopAbort; /** * Tests small file upload functionality for @@ -94,16 +99,16 @@ public void testBlocksClosed() throws Throwable { Path dest = path("testBlocksClosed"); describe(" testBlocksClosed"); FSDataOutputStream stream = getFileSystem().create(dest, true); - S3AInstrumentation.OutputStreamStatistics statistics + BlockOutputStreamStatistics statistics = S3ATestUtils.getOutputStreamStatistics(stream); byte[] data = ContractTestUtils.dataset(16, 'a', 26); stream.write(data); LOG.info("closing output stream"); stream.close(); assertEquals("total allocated blocks in " + statistics, - 1, statistics.blocksAllocated()); + 1, statistics.getBlocksAllocated()); assertEquals("actively allocated blocks in " + statistics, - 0, statistics.blocksActivelyAllocated()); + 0, statistics.getBlocksActivelyAllocated()); LOG.info("end of test case"); } @@ -129,7 +134,7 @@ private void markAndResetDatablock(S3ADataBlocks.BlockFactory factory) throws Exception { S3AInstrumentation instrumentation = new S3AInstrumentation(new URI("s3a://example")); - S3AInstrumentation.OutputStreamStatistics outstats + BlockOutputStreamStatistics outstats = instrumentation.newOutputStreamStatistics(null); S3ADataBlocks.DataBlock block = factory.create(1, BLOCK_SIZE, outstats); block.write(dataset, 0, dataset.length); @@ -154,4 +159,51 @@ public void testMarkReset() throws Throwable { markAndResetDatablock(createFactory(getFileSystem())); } + @Test + public void testAbortAfterWrite() throws Throwable { + describe("Verify abort after a write does not create a file"); + Path dest = path(getMethodName()); + FileSystem fs = getFileSystem(); + ContractTestUtils.assertHasPathCapabilities(fs, dest, ABORTABLE_STREAM); + FSDataOutputStream stream = fs.create(dest, true); + byte[] data = ContractTestUtils.dataset(16, 'a', 26); + try { + ContractTestUtils.assertCapabilities(stream, + new String[]{ABORTABLE_STREAM}, + null); + stream.write(data); + assertCompleteAbort(stream.abort()); + // second attempt is harmless + assertNoopAbort(stream.abort()); + + // the path should not exist + ContractTestUtils.assertPathsDoNotExist(fs, "aborted file", dest); + } finally { + IOUtils.closeStream(stream); + // check the path doesn't exist "after" closing stream + ContractTestUtils.assertPathsDoNotExist(fs, "aborted file", dest); + } + // and it can be called on the stream after being closed. + assertNoopAbort(stream.abort()); + } + + /** + * A stream which was abort()ed after being close()d for a + * successful write will return indicating nothing happened. + */ + @Test + public void testAbortAfterCloseIsHarmless() throws Throwable { + describe("Verify abort on a closed stream is harmless " + + "and that the result indicates that nothing happened"); + Path dest = path(getMethodName()); + FileSystem fs = getFileSystem(); + byte[] data = ContractTestUtils.dataset(16, 'a', 26); + try (FSDataOutputStream stream = fs.create(dest, true)) { + stream.write(data); + assertCompleteAbort(stream.abort()); + stream.close(); + assertNoopAbort(stream.abort()); + } + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABucketExistence.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABucketExistence.java index 6be9003e4ec38..934ad29ed2572 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABucketExistence.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABucketExistence.java @@ -62,8 +62,10 @@ public void testNoBucketProbing() throws Exception { Path root = new Path(uri); - expectUnknownStore( - () -> fs.getFileStatus(root)); + //See HADOOP-17323. + assertTrue("root path should always exist", fs.exists(root)); + assertTrue("getFileStatus on root should always return a directory", + fs.getFileStatus(root).isDirectory()); expectUnknownStore( () -> fs.listStatus(root)); @@ -75,7 +77,11 @@ public void testNoBucketProbing() throws Exception { // the exception must not be caught and marked down to an FNFE expectUnknownStore(() -> fs.exists(src)); - expectUnknownStore(() -> fs.isFile(src)); + // now that isFile() only does a HEAD, it will get a 404 without + // the no-such-bucket error. + assertFalse("isFile(" + src + ")" + + " was expected to complete by returning false", + fs.isFile(src)); expectUnknownStore(() -> fs.isDirectory(src)); expectUnknownStore(() -> fs.mkdirs(src)); expectUnknownStore(() -> fs.delete(src)); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java index f086a08201cd7..852b49ac1e1fa 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java @@ -20,8 +20,13 @@ import java.io.IOException; import java.nio.file.AccessDeniedException; +import java.util.Arrays; +import java.util.Collection; +import org.assertj.core.api.Assertions; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -31,13 +36,26 @@ import org.apache.hadoop.io.IOUtils; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; +import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP; +import static org.apache.hadoop.fs.s3a.Constants.ETAG_CHECKSUM_ENABLED; +import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Concrete class that extends {@link AbstractTestS3AEncryption} * and tests SSE-C encryption. + * HEAD requests against SSE-C-encrypted data will fail if the wrong key + * is presented, so the tests are very brittle to S3Guard being on vs. off. + * Equally "vexing" has been the optimizations of getFileStatus(), wherein + * LIST comes before HEAD path + / */ +@RunWith(Parameterized.class) public class ITestS3AEncryptionSSEC extends AbstractTestS3AEncryption { private static final String SERVICE_AMAZON_S3_STATUS_CODE_403 @@ -52,18 +70,67 @@ public class ITestS3AEncryptionSSEC extends AbstractTestS3AEncryption { = "msdo3VvvZznp66Gth58a91Hxe/UpExMkwU9BHkIjfW8="; private static final int TEST_FILE_LEN = 2048; + /** + * Parameterization. + */ + @Parameterized.Parameters(name = "{0}") + public static Collection params() { + return Arrays.asList(new Object[][]{ + {"raw-keep-markers", false, true}, + {"raw-delete-markers", false, false}, + {"guarded-keep-markers", true, true}, + {"guarded-delete-markers", true, false} + }); + } + + /** + * Parameter: should the stores be guarded? + */ + private final boolean s3guard; + + /** + * Parameter: should directory markers be retained? + */ + private final boolean keepMarkers; + /** * Filesystem created with a different key. */ - private FileSystem fsKeyB; + private S3AFileSystem fsKeyB; + + public ITestS3AEncryptionSSEC(final String name, + final boolean s3guard, + final boolean keepMarkers) { + this.s3guard = s3guard; + this.keepMarkers = keepMarkers; + } @Override protected Configuration createConfiguration() { Configuration conf = super.createConfiguration(); disableFilesystemCaching(conf); - conf.set(Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM, + String bucketName = getTestBucketName(conf); + removeBucketOverrides(bucketName, conf, + S3_METADATA_STORE_IMPL); + if (!s3guard) { + // in a raw run remove all s3guard settings + removeBaseAndBucketOverrides(bucketName, conf, + S3_METADATA_STORE_IMPL); + } + // directory marker options + removeBaseAndBucketOverrides(bucketName, conf, + DIRECTORY_MARKER_POLICY, + ETAG_CHECKSUM_ENABLED, + SERVER_SIDE_ENCRYPTION_ALGORITHM, + SERVER_SIDE_ENCRYPTION_KEY); + conf.set(DIRECTORY_MARKER_POLICY, + keepMarkers + ? DIRECTORY_MARKER_POLICY_KEEP + : DIRECTORY_MARKER_POLICY_DELETE); + conf.set(SERVER_SIDE_ENCRYPTION_ALGORITHM, getSSEAlgorithm().getMethod()); - conf.set(Constants.SERVER_SIDE_ENCRYPTION_KEY, KEY_1); + conf.set(SERVER_SIDE_ENCRYPTION_KEY, KEY_1); + conf.setBoolean(ETAG_CHECKSUM_ENABLED, true); return conf; } @@ -109,31 +176,19 @@ public void testCreateFileAndReadWithDifferentEncryptionKey() throws } /** - * While each object has its own key and should be distinct, this verifies - * that hadoop treats object keys as a filesystem path. So if a top level - * dir is encrypted with keyA, a sublevel dir cannot be accessed with a - * different keyB. - * - * This is expected AWS S3 SSE-C behavior. * + * You can use a different key under a sub directory, even if you + * do not have permissions to read the marker. * @throws Exception */ @Test public void testCreateSubdirWithDifferentKey() throws Exception { - requireUnguardedFilesystem(); - - intercept(AccessDeniedException.class, - SERVICE_AMAZON_S3_STATUS_CODE_403, - () -> { - Path base = path("testCreateSubdirWithDifferentKey"); - Path nestedDirectory = new Path(base, "nestedDir"); - fsKeyB = createNewFileSystemWithSSECKey( - KEY_2); - getFileSystem().mkdirs(base); - fsKeyB.mkdirs(nestedDirectory); - // expected to fail - return fsKeyB.getFileStatus(nestedDirectory); - }); + Path base = path("testCreateSubdirWithDifferentKey"); + Path nestedDirectory = new Path(base, "nestedDir"); + fsKeyB = createNewFileSystemWithSSECKey( + KEY_2); + getFileSystem().mkdirs(base); + fsKeyB.mkdirs(nestedDirectory); } /** @@ -176,14 +231,11 @@ public void testRenameFile() throws Exception { } /** - * It is possible to list the contents of a directory up to the actual - * end of the nested directories. This is due to how S3A mocks the - * directories and how prefixes work in S3. + * Directory listings always work. * @throws Exception */ @Test public void testListEncryptedDir() throws Exception { - requireUnguardedFilesystem(); Path pathABC = path("testListEncryptedDir/a/b/c/"); Path pathAB = pathABC.getParent(); @@ -196,17 +248,11 @@ public void testListEncryptedDir() throws Exception { fsKeyB.listFiles(pathA, true); fsKeyB.listFiles(pathAB, true); - - //Until this point, no exception is thrown about access - intercept(AccessDeniedException.class, - SERVICE_AMAZON_S3_STATUS_CODE_403, - () -> { - fsKeyB.listFiles(pathABC, false); - }); + fsKeyB.listFiles(pathABC, false); Configuration conf = this.createConfiguration(); - conf.unset(Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM); - conf.unset(Constants.SERVER_SIDE_ENCRYPTION_KEY); + conf.unset(SERVER_SIDE_ENCRYPTION_ALGORITHM); + conf.unset(SERVER_SIDE_ENCRYPTION_KEY); S3AContract contract = (S3AContract) createContract(conf); contract.init(); @@ -215,20 +261,14 @@ public void testListEncryptedDir() throws Exception { //unencrypted can access until the final directory unencryptedFileSystem.listFiles(pathA, true); unencryptedFileSystem.listFiles(pathAB, true); - AWSBadRequestException ex = intercept(AWSBadRequestException.class, - () -> { - unencryptedFileSystem.listFiles(pathABC, false); - }); + unencryptedFileSystem.listFiles(pathABC, false); } /** - * Much like the above list encrypted directory test, you cannot get the - * metadata of an object without the correct encryption key. - * @throws Exception + * listStatus also works with encrypted directories and key mismatch. */ @Test public void testListStatusEncryptedDir() throws Exception { - requireUnguardedFilesystem(); Path pathABC = path("testListStatusEncryptedDir/a/b/c/"); Path pathAB = pathABC.getParent(); @@ -240,17 +280,14 @@ public void testListStatusEncryptedDir() throws Exception { fsKeyB.listStatus(pathA); fsKeyB.listStatus(pathAB); - //Until this point, no exception is thrown about access - intercept(AccessDeniedException.class, - SERVICE_AMAZON_S3_STATUS_CODE_403, - () -> { - fsKeyB.listStatus(pathABC); - }); + // this used to raise 403, but with LIST before HEAD, + // no longer true. + fsKeyB.listStatus(pathABC); //Now try it with an unencrypted filesystem. Configuration conf = createConfiguration(); - conf.unset(Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM); - conf.unset(Constants.SERVER_SIDE_ENCRYPTION_KEY); + conf.unset(SERVER_SIDE_ENCRYPTION_ALGORITHM); + conf.unset(SERVER_SIDE_ENCRYPTION_KEY); S3AContract contract = (S3AContract) createContract(conf); contract.init(); @@ -259,21 +296,15 @@ public void testListStatusEncryptedDir() throws Exception { //unencrypted can access until the final directory unencryptedFileSystem.listStatus(pathA); unencryptedFileSystem.listStatus(pathAB); - - intercept(AWSBadRequestException.class, - () -> { - unencryptedFileSystem.listStatus(pathABC); - }); + unencryptedFileSystem.listStatus(pathABC); } /** - * Much like trying to access a encrypted directory, an encrypted file cannot - * have its metadata read, since both are technically an object. + * An encrypted file cannot have its metadata read. * @throws Exception */ @Test public void testListStatusEncryptedFile() throws Exception { - requireUnguardedFilesystem(); Path pathABC = path("testListStatusEncryptedFile/a/b/c/"); assertTrue("mkdirs failed", getFileSystem().mkdirs(pathABC)); @@ -283,23 +314,25 @@ public void testListStatusEncryptedFile() throws Exception { fsKeyB = createNewFileSystemWithSSECKey(KEY_4); //Until this point, no exception is thrown about access - intercept(AccessDeniedException.class, - SERVICE_AMAZON_S3_STATUS_CODE_403, - () -> { - fsKeyB.listStatus(fileToStat); - }); + if (statusProbesCheckS3(fsKeyB, fileToStat)) { + intercept(AccessDeniedException.class, + SERVICE_AMAZON_S3_STATUS_CODE_403, + () -> fsKeyB.listStatus(fileToStat)); + } else { + fsKeyB.listStatus(fileToStat); + } } /** - * Skip the test case if S3Guard is enabled; generally this is because - * list and GetFileStatus calls can succeed even with different keys. + * Do file status probes check S3? + * @param fs filesystem + * @param path file path + * @return true if check for a path being a file will issue a HEAD request. */ - protected void requireUnguardedFilesystem() { - assume("Filesystem has a metastore", - !getFileSystem().hasMetadataStore()); + private boolean statusProbesCheckS3(S3AFileSystem fs, Path path) { + return !fs.hasMetadataStore() || !fs.allowAuthoritative(path); } - /** * It is possible to delete directories without the proper encryption key and * the hierarchy above it. @@ -308,7 +341,7 @@ protected void requireUnguardedFilesystem() { */ @Test public void testDeleteEncryptedObjectWithDifferentKey() throws Exception { - requireUnguardedFilesystem(); + //requireUnguardedFilesystem(); Path pathABC = path("testDeleteEncryptedObjectWithDifferentKey/a/b/c/"); Path pathAB = pathABC.getParent(); @@ -317,12 +350,13 @@ public void testDeleteEncryptedObjectWithDifferentKey() throws Exception { Path fileToDelete = new Path(pathABC, "filetobedeleted.txt"); writeThenReadFile(fileToDelete, TEST_FILE_LEN); fsKeyB = createNewFileSystemWithSSECKey(KEY_4); - intercept(AccessDeniedException.class, - SERVICE_AMAZON_S3_STATUS_CODE_403, - () -> { - fsKeyB.delete(fileToDelete, false); - }); - + if (statusProbesCheckS3(fsKeyB, fileToDelete)) { + intercept(AccessDeniedException.class, + SERVICE_AMAZON_S3_STATUS_CODE_403, + () -> fsKeyB.delete(fileToDelete, false)); + } else { + fsKeyB.delete(fileToDelete, false); + } //This is possible fsKeyB.delete(pathABC, true); fsKeyB.delete(pathAB, true); @@ -330,15 +364,33 @@ public void testDeleteEncryptedObjectWithDifferentKey() throws Exception { assertPathDoesNotExist("expected recursive delete", fileToDelete); } - private FileSystem createNewFileSystemWithSSECKey(String sseCKey) throws + /** + * getFileChecksum always goes to S3, so when + * the caller lacks permissions, it fails irrespective + * of guard. + */ + @Test + public void testChecksumRequiresReadAccess() throws Throwable { + Path path = path("tagged-file"); + S3AFileSystem fs = getFileSystem(); + touch(fs, path); + Assertions.assertThat(fs.getFileChecksum(path)) + .isNotNull(); + fsKeyB = createNewFileSystemWithSSECKey(KEY_4); + intercept(AccessDeniedException.class, + SERVICE_AMAZON_S3_STATUS_CODE_403, + () -> fsKeyB.getFileChecksum(path)); + } + + private S3AFileSystem createNewFileSystemWithSSECKey(String sseCKey) throws IOException { Configuration conf = this.createConfiguration(); - conf.set(Constants.SERVER_SIDE_ENCRYPTION_KEY, sseCKey); + conf.set(SERVER_SIDE_ENCRYPTION_KEY, sseCKey); S3AContract contract = (S3AContract) createContract(conf); contract.init(); FileSystem fileSystem = contract.getTestFileSystem(); - return fileSystem; + return (S3AFileSystem) fileSystem; } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java index 5b807c2b07d71..c5ef65ff927ac 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java @@ -35,6 +35,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; +import static org.apache.hadoop.fs.s3a.EncryptionTestUtils.AWS_KMS_SSE_ALGORITHM; import static org.apache.hadoop.fs.s3a.S3AEncryptionMethods.SSE_KMS; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; @@ -107,6 +108,23 @@ public void testEncryptionSettingPropagation() throws Throwable { public void testEncryption() throws Throwable { } + /** + * Skipping if the test bucket is not configured with + * aws:kms encryption algorithm. + */ + @Override + public void testEncryptionOverRename() throws Throwable { + S3AFileSystem fs = getFileSystem(); + Path path = path(getMethodName() + "find-encryption-algo"); + ContractTestUtils.touch(fs, path); + String sseAlgorithm = fs.getObjectMetadata(path).getSSEAlgorithm(); + if(StringUtils.isBlank(sseAlgorithm) || + !sseAlgorithm.equals(AWS_KMS_SSE_ALGORITHM)) { + skip("Test bucket is not configured with " + AWS_KMS_SSE_ALGORITHM); + } + super.testEncryptionOverRename(); + } + @Test public void testEncryptionOverRename2() throws Throwable { S3AFileSystem fs = getFileSystem(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java index 83deb113dcad1..c0feb85de1584 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java @@ -20,7 +20,8 @@ import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.MultiObjectDeleteException; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.assertj.core.api.Assertions; import org.junit.Assume; import org.apache.hadoop.conf.Configuration; @@ -141,13 +142,14 @@ public void testMultiObjectDeleteNoPermissions() throws Throwable { Path markerPath = fs.keyToQualifiedPath(marker); keys.add(new DeleteObjectsRequest.KeyVersion(marker)); - Pair, List> pair = + Pair, List> pair = new MultiObjectDeleteSupport(fs.createStoreContext(), null) .splitUndeletedKeys(ex, keys); - assertEquals(undeleted, pair.getLeft()); - List right = pair.getRight(); - assertEquals("Wrong size for " + join(right), 1, right.size()); - assertEquals(markerPath, right.get(0)); + assertEquals(undeleted, toPathList(pair.getLeft())); + List right = pair.getRight(); + Assertions.assertThat(right) + .hasSize(1); + assertEquals(markerPath, right.get(0).getPath()); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java index e2f7fead466a3..ca8e49cc33be2 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java @@ -18,18 +18,18 @@ package org.apache.hadoop.fs.s3a; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; + +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; +import org.apache.hadoop.fs.s3a.performance.AbstractS3ACostTest; + -import org.assertj.core.api.Assertions; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import org.assertj.core.api.Assertions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,26 +39,21 @@ import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; -import java.util.UUID; -import java.util.concurrent.Callable; + import static org.apache.hadoop.fs.contract.ContractTestUtils.*; -import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; import static org.apache.hadoop.fs.s3a.Statistic.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.*; import static org.apache.hadoop.test.GenericTestUtils.getTestDir; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** - * Use metrics to assert about the cost of file status queries. - * {@link S3AFileSystem#getFileStatus(Path)}. - * Parameterized on guarded vs raw. + * Use metrics to assert about the cost of file API calls. + * Parameterized on guarded vs raw. and directory marker keep vs delete */ @RunWith(Parameterized.class) -public class ITestS3AFileOperationCost extends AbstractS3ATestBase { - - private MetricDiff metadataRequests; - private MetricDiff listRequests; +public class ITestS3AFileOperationCost extends AbstractS3ACostTest { private static final Logger LOG = LoggerFactory.getLogger(ITestS3AFileOperationCost.class); @@ -69,154 +64,252 @@ public class ITestS3AFileOperationCost extends AbstractS3ATestBase { @Parameterized.Parameters(name = "{0}") public static Collection params() { return Arrays.asList(new Object[][]{ - {"raw", false}, - {"guarded", true} + {"raw-keep-markers", false, true, false}, + {"raw-delete-markers", false, false, false}, + {"nonauth-keep-markers", true, true, false}, + {"auth-delete-markers", true, false, true} }); } - private final String name; + public ITestS3AFileOperationCost(final String name, + final boolean s3guard, + final boolean keepMarkers, + final boolean authoritative) { + super(s3guard, keepMarkers, authoritative); + } - private final boolean s3guard; + /** + * Test the cost of {@code listLocatedStatus(file)}. + * There's a minor inefficiency in that calling this on + * a file in S3Guard still executes a LIST call, even + * though the file record is in the store. + */ + @Test + public void testCostOfLocatedFileStatusOnFile() throws Throwable { + describe("performing listLocatedStatus on a file"); + Path file = file(methodPath()); + S3AFileSystem fs = getFileSystem(); + verifyMetrics(() -> fs.listLocatedStatus(file), + whenRaw(FILE_STATUS_FILE_PROBE + .plus(LIST_LOCATED_STATUS_LIST_OP)), + whenAuthoritative(LIST_LOCATED_STATUS_LIST_OP), + whenNonauth(LIST_LOCATED_STATUS_LIST_OP + .plus(S3GUARD_NONAUTH_FILE_STATUS_PROBE))); + } - public ITestS3AFileOperationCost(final String name, final boolean s3guard) { - this.name = name; - this.s3guard = s3guard; + @Test + public void testCostOfListLocatedStatusOnEmptyDir() throws Throwable { + describe("performing listLocatedStatus on an empty dir"); + Path dir = dir(methodPath()); + S3AFileSystem fs = getFileSystem(); + verifyMetrics(() -> + fs.listLocatedStatus(dir), + whenRaw(LIST_LOCATED_STATUS_LIST_OP + .plus(GET_FILE_STATUS_ON_EMPTY_DIR)), + whenAuthoritative(NO_IO), + whenNonauth(LIST_LOCATED_STATUS_LIST_OP)); } - @Override - public Configuration createConfiguration() { - Configuration conf = super.createConfiguration(); - String bucketName = getTestBucketName(conf); - removeBucketOverrides(bucketName, conf, - S3_METADATA_STORE_IMPL); - if (!s3guard) { - // in a raw run remove all s3guard settings - removeBaseAndBucketOverrides(bucketName, conf, - S3_METADATA_STORE_IMPL); - } - disableFilesystemCaching(conf); - return conf; + @Test + public void testCostOfListLocatedStatusOnNonEmptyDir() throws Throwable { + describe("performing listLocatedStatus on a non empty dir"); + Path dir = dir(methodPath()); + S3AFileSystem fs = getFileSystem(); + Path file = file(new Path(dir, "file.txt")); + verifyMetrics(() -> + fs.listLocatedStatus(dir), + whenRaw(LIST_LOCATED_STATUS_LIST_OP), + whenAuthoritative(NO_IO), + whenNonauth(LIST_LOCATED_STATUS_LIST_OP)); } - @Override - public void setup() throws Exception { - super.setup(); - if (s3guard) { - // s3guard is required for those test runs where any of the - // guard options are set - assumeS3GuardState(true, getConfiguration()); - } + + @Test + public void testCostOfListFilesOnFile() throws Throwable { + describe("Performing listFiles() on a file"); + Path file = path(getMethodName() + ".txt"); S3AFileSystem fs = getFileSystem(); - metadataRequests = new MetricDiff(fs, OBJECT_METADATA_REQUESTS); - listRequests = new MetricDiff(fs, OBJECT_LIST_REQUESTS); - skipDuringFaultInjection(fs); + touch(fs, file); + verifyMetrics(() -> + fs.listFiles(file, true), + whenRaw(LIST_LOCATED_STATUS_LIST_OP + .plus(GET_FILE_STATUS_ON_FILE)), + whenAuthoritative(NO_IO), + whenNonauth(LIST_LOCATED_STATUS_LIST_OP)); } @Test - public void testCostOfGetFileStatusOnFile() throws Throwable { - describe("performing getFileStatus on a file"); - Path simpleFile = path("simple.txt"); + public void testCostOfListFilesOnEmptyDir() throws Throwable { + describe("Perpforming listFiles() on an empty dir with marker"); + // this attem + Path dir = path(getMethodName()); S3AFileSystem fs = getFileSystem(); - touch(fs, simpleFile); - resetMetricDiffs(); - FileStatus status = fs.getFileStatus(simpleFile); - assertTrue("not a file: " + status, status.isFile()); - if (!fs.hasMetadataStore()) { - metadataRequests.assertDiffEquals(1); - } - listRequests.assertDiffEquals(0); + fs.mkdirs(dir); + verifyMetrics(() -> + fs.listFiles(dir, true), + whenRaw(LIST_FILES_LIST_OP + .plus(GET_FILE_STATUS_ON_EMPTY_DIR)), + whenAuthoritative(NO_IO), + whenNonauth(LIST_FILES_LIST_OP)); } - private void resetMetricDiffs() { - reset(metadataRequests, listRequests); + @Test + public void testCostOfListFilesOnNonEmptyDir() throws Throwable { + describe("Performing listFiles() on a non empty dir"); + Path dir = path(getMethodName()); + S3AFileSystem fs = getFileSystem(); + fs.mkdirs(dir); + Path file = new Path(dir, "file.txt"); + touch(fs, file); + verifyMetrics(() -> + fs.listFiles(dir, true), + whenRaw(LIST_FILES_LIST_OP), + whenAuthoritative(NO_IO), + whenNonauth(LIST_FILES_LIST_OP)); } - /** - * Verify that the head and list calls match expectations, - * then reset the counters ready for the next operation. - * @param head expected HEAD count - * @param list expected LIST count - */ - private void verifyOperationCount(int head, int list) { - metadataRequests.assertDiffEquals(head); - listRequests.assertDiffEquals(list); - metadataRequests.reset(); - listRequests.reset(); + @Test + public void testCostOfListFilesOnNonExistingDir() throws Throwable { + describe("Performing listFiles() on a non existing dir"); + Path dir = path(getMethodName()); + S3AFileSystem fs = getFileSystem(); + verifyMetricsIntercepting(FileNotFoundException.class, "", + () -> fs.listFiles(dir, true), + whenRaw(LIST_FILES_LIST_OP + .plus(GET_FILE_STATUS_FNFE))); } @Test - public void testCostOfGetFileStatusOnEmptyDir() throws Throwable { - describe("performing getFileStatus on an empty directory"); + public void testCostOfListStatusOnFile() throws Throwable { + describe("Performing listStatus() on a file"); + Path file = path(getMethodName() + ".txt"); + S3AFileSystem fs = getFileSystem(); + touch(fs, file); + verifyMetrics(() -> + fs.listStatus(file), + whenRaw(LIST_STATUS_LIST_OP + .plus(GET_FILE_STATUS_ON_FILE)), + whenAuthoritative(LIST_STATUS_LIST_OP), + whenNonauth(LIST_STATUS_LIST_OP + .plus(S3GUARD_NONAUTH_FILE_STATUS_PROBE))); + } + + @Test + public void testCostOfListStatusOnEmptyDir() throws Throwable { + describe("Performing listStatus() on an empty dir"); + Path dir = path(getMethodName()); S3AFileSystem fs = getFileSystem(); - Path dir = path("empty"); fs.mkdirs(dir); - resetMetricDiffs(); - S3AFileStatus status = fs.innerGetFileStatus(dir, true, - StatusProbeEnum.ALL); - assertSame("not empty: " + status, Tristate.TRUE, - status.isEmptyDirectory()); + verifyMetrics(() -> + fs.listStatus(dir), + whenRaw(LIST_STATUS_LIST_OP + .plus(GET_FILE_STATUS_ON_EMPTY_DIR)), + whenAuthoritative(NO_IO), + whenNonauth(LIST_STATUS_LIST_OP)); + } - if (!fs.hasMetadataStore()) { - metadataRequests.assertDiffEquals(2); - } - listRequests.assertDiffEquals(0); + @Test + public void testCostOfListStatusOnNonEmptyDir() throws Throwable { + describe("Performing listStatus() on a non empty dir"); + Path dir = path(getMethodName()); + S3AFileSystem fs = getFileSystem(); + fs.mkdirs(dir); + Path file = new Path(dir, "file.txt"); + touch(fs, file); + verifyMetrics(() -> + fs.listStatus(dir), + whenRaw(LIST_STATUS_LIST_OP), + whenAuthoritative(NO_IO), + whenNonauth(LIST_STATUS_LIST_OP)); + } + @Test + public void testCostOfGetFileStatusOnFile() throws Throwable { + describe("performing getFileStatus on a file"); + Path simpleFile = file(methodPath()); + S3AFileStatus status = verifyRawInnerGetFileStatus(simpleFile, true, + StatusProbeEnum.ALL, + GET_FILE_STATUS_ON_FILE); + assertTrue("not a file: " + status, status.isFile()); + } + + @Test + public void testCostOfGetFileStatusOnEmptyDir() throws Throwable { + describe("performing getFileStatus on an empty directory"); + Path dir = dir(methodPath()); + S3AFileStatus status = verifyRawInnerGetFileStatus(dir, true, + StatusProbeEnum.ALL, + GET_FILE_STATUS_ON_DIR_MARKER); + assertSame("not empty: " + status, Tristate.TRUE, + status.isEmptyDirectory()); // but now only ask for the directories and the file check is skipped. - resetMetricDiffs(); - fs.innerGetFileStatus(dir, false, - StatusProbeEnum.DIRECTORIES); - if (!fs.hasMetadataStore()) { - metadataRequests.assertDiffEquals(1); - } + verifyRawInnerGetFileStatus(dir, false, + StatusProbeEnum.DIRECTORIES, + FILE_STATUS_DIR_PROBE); + + // now look at isFile/isDir against the same entry + isDir(dir, true, FILE_STATUS_DIR_PROBE); + isFile(dir, false, FILE_STATUS_FILE_PROBE); } @Test public void testCostOfGetFileStatusOnMissingFile() throws Throwable { describe("performing getFileStatus on a missing file"); - S3AFileSystem fs = getFileSystem(); - Path path = path("missing"); - resetMetricDiffs(); - intercept(FileNotFoundException.class, - () -> fs.getFileStatus(path)); - metadataRequests.assertDiffEquals(2); - listRequests.assertDiffEquals(1); + interceptRawGetFileStatusFNFE(methodPath(), false, + StatusProbeEnum.ALL, + GET_FILE_STATUS_FNFE); } @Test - public void testCostOfGetFileStatusOnMissingSubPath() throws Throwable { - describe("performing getFileStatus on a missing file"); - S3AFileSystem fs = getFileSystem(); - Path path = path("missingdir/missingpath"); - resetMetricDiffs(); - intercept(FileNotFoundException.class, - () -> fs.getFileStatus(path)); - metadataRequests.assertDiffEquals(2); - listRequests.assertDiffEquals(1); + public void testCostOfRootFileStatus() throws Throwable { + Path root = path("/"); + S3AFileStatus rootStatus = verifyRawInnerGetFileStatus( + root, + false, + StatusProbeEnum.ALL, + ROOT_FILE_STATUS_PROBE); + String rootStatusContent = rootStatus.toString(); + Assertions.assertThat(rootStatus.isDirectory()) + .describedAs("Status returned should be a directory " + + rootStatusContent) + .isEqualTo(true); + Assertions.assertThat(rootStatus.isEmptyDirectory()) + .isEqualTo(Tristate.UNKNOWN); + + rootStatus = verifyRawInnerGetFileStatus( + root, + true, + StatusProbeEnum.ALL, + FILE_STATUS_DIR_PROBE); + Assertions.assertThat(rootStatus.isDirectory()) + .describedAs("Status returned should be a directory " + + rootStatusContent) + .isEqualTo(true); + Assertions.assertThat(rootStatus.isEmptyDirectory()) + .isNotEqualByComparingTo(Tristate.UNKNOWN); + + } + + @Test + public void testIsDirIsFileMissingPath() throws Throwable { + describe("performing isDir and isFile on a missing file"); + Path path = methodPath(); + // now look at isFile/isDir against the same entry + isDir(path, false, + FILE_STATUS_DIR_PROBE); + isFile(path, false, + FILE_STATUS_FILE_PROBE); } @Test public void testCostOfGetFileStatusOnNonEmptyDir() throws Throwable { describe("performing getFileStatus on a non-empty directory"); - S3AFileSystem fs = getFileSystem(); - Path dir = path("empty"); - fs.mkdirs(dir); - Path simpleFile = new Path(dir, "simple.txt"); - touch(fs, simpleFile); - resetMetricDiffs(); - S3AFileStatus status = fs.innerGetFileStatus(dir, true, - StatusProbeEnum.ALL); - if (status.isEmptyDirectory() == Tristate.TRUE) { - // erroneous state - String fsState = fs.toString(); - fail("FileStatus says directory isempty: " + status - + "\n" + ContractTestUtils.ls(fs, dir) - + "\n" + fsState); - } - if (!fs.hasMetadataStore()) { - metadataRequests.assertDiffEquals(2); - listRequests.assertDiffEquals(1); - } + Path dir = dir(methodPath()); + file(new Path(dir, "simple.txt")); + S3AFileStatus status = verifyRawInnerGetFileStatus(dir, true, + StatusProbeEnum.ALL, + GET_FILE_STATUS_ON_DIR); + assertEmptyDirStatus(status, Tristate.FALSE); } - @Test public void testCostOfCopyFromLocalFile() throws Throwable { describe("testCostOfCopyFromLocalFile"); @@ -234,19 +327,18 @@ public void testCostOfCopyFromLocalFile() throws Throwable { byte[] data = dataset(len, 'A', 'Z'); writeDataset(localFS, localPath, data, len, 1024, true); S3AFileSystem s3a = getFileSystem(); - MetricDiff copyLocalOps = new MetricDiff(s3a, - INVOCATION_COPY_FROM_LOCAL_FILE); - MetricDiff putRequests = new MetricDiff(s3a, - OBJECT_PUT_REQUESTS); - MetricDiff putBytes = new MetricDiff(s3a, - OBJECT_PUT_BYTES); - - Path remotePath = path("copied"); - s3a.copyFromLocalFile(false, true, localPath, remotePath); + + + Path remotePath = methodPath(); + + verifyMetrics(() -> { + s3a.copyFromLocalFile(false, true, localPath, remotePath); + return "copy"; + }, + with(INVOCATION_COPY_FROM_LOCAL_FILE, 1), + with(OBJECT_PUT_REQUESTS, 1), + with(OBJECT_PUT_BYTES, len)); verifyFileContents(s3a, remotePath, data); - copyLocalOps.assertDiffEquals(1); - putRequests.assertDiffEquals(1); - putBytes.assertDiffEquals(len); // print final stats LOG.info("Filesystem {}", s3a); } finally { @@ -254,267 +346,164 @@ public void testCostOfCopyFromLocalFile() throws Throwable { } } - private boolean reset(MetricDiff... diffs) { - for (MetricDiff diff : diffs) { - diff.reset(); - } - return true; - } - - @Test - public void testFakeDirectoryDeletion() throws Throwable { - describe("Verify whether create file works after renaming a file. " - + "In S3, rename deletes any fake directories as a part of " - + "clean up activity"); - S3AFileSystem fs = getFileSystem(); - - Path srcBaseDir = path("src"); - mkdirs(srcBaseDir); - MetricDiff deleteRequests = - new MetricDiff(fs, Statistic.OBJECT_DELETE_REQUESTS); - MetricDiff directoriesDeleted = - new MetricDiff(fs, Statistic.DIRECTORIES_DELETED); - MetricDiff fakeDirectoriesDeleted = - new MetricDiff(fs, Statistic.FAKE_DIRECTORIES_DELETED); - MetricDiff directoriesCreated = - new MetricDiff(fs, Statistic.DIRECTORIES_CREATED); - - // when you call toString() on this, you get the stats - // so it gets auto-evaluated in log calls. - Object summary = new Object() { - @Override - public String toString() { - return String.format("[%s, %s, %s, %s]", - directoriesCreated, directoriesDeleted, - deleteRequests, fakeDirectoriesDeleted); - } - }; - - // reset operation to invoke - Callable reset = () -> - reset(deleteRequests, directoriesCreated, directoriesDeleted, - fakeDirectoriesDeleted); - - Path srcDir = new Path(srcBaseDir, "1/2/3/4/5/6"); - int srcDirDepth = directoriesInPath(srcDir); - // one dir created, one removed - mkdirs(srcDir); - String state = "after mkdir(srcDir) " + summary; - directoriesCreated.assertDiffEquals(state, 1); - deleteRequests.assertDiffEquals(state, 1); - directoriesDeleted.assertDiffEquals(state, 0); - // HADOOP-14255 deletes unnecessary fake directory objects in mkdirs() - fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth - 1); - reset.call(); - - // creating a file should trigger demise of the src dir - final Path srcFilePath = new Path(srcDir, "source.txt"); - touch(fs, srcFilePath); - state = "after touch(fs, srcFilePath) " + summary; - deleteRequests.assertDiffEquals(state, 1); - directoriesCreated.assertDiffEquals(state, 0); - directoriesDeleted.assertDiffEquals(state, 0); - fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth); - - reset.call(); - - // create a directory tree, expect the dir to be created and - // a request to delete all parent directories made. - Path destBaseDir = path("dest"); - Path destDir = new Path(destBaseDir, "1/2/3/4/5/6"); - Path destFilePath = new Path(destDir, "dest.txt"); - mkdirs(destDir); - state = "after mkdir(destDir) " + summary; - - int destDirDepth = directoriesInPath(destDir); - directoriesCreated.assertDiffEquals(state, 1); - deleteRequests.assertDiffEquals(state, 1); - directoriesDeleted.assertDiffEquals(state, 0); - fakeDirectoriesDeleted.assertDiffEquals(state, destDirDepth - 1); - - // create a new source file. - // Explicitly use a new path object to guarantee that the parent paths - // are different object instances - final Path srcFile2 = new Path(srcDir.toUri() + "/source2.txt"); - touch(fs, srcFile2); - - reset.call(); - - // rename the source file to the destination file. - // this tests the file rename path, not the dir rename path - // as srcFile2 exists, the parent dir of srcFilePath must not be created. - fs.rename(srcFilePath, destFilePath); - state = String.format("after rename(srcFilePath, destFilePath)" - + " %s dest dir depth=%d", - summary, - destDirDepth); - - directoriesCreated.assertDiffEquals(state, 0); - // one for the renamed file, one for the parent of the dest dir - deleteRequests.assertDiffEquals(state, 2); - directoriesDeleted.assertDiffEquals(state, 0); - fakeDirectoriesDeleted.assertDiffEquals(state, destDirDepth); - - // these asserts come after the checks on iop counts, so they don't - // interfere - assertIsFile(destFilePath); - assertIsDirectory(srcDir); - assertPathDoesNotExist("should have gone in the rename", srcFilePath); - reset.call(); - - // rename the source file2 to the (no longer existing - // this tests the file rename path, not the dir rename path - // as srcFile2 exists, the parent dir of srcFilePath must not be created. - fs.rename(srcFile2, srcFilePath); - state = String.format("after rename(%s, %s) %s dest dir depth=%d", - srcFile2, srcFilePath, - summary, - destDirDepth); - - // here we expect there to be no fake directories - directoriesCreated.assertDiffEquals(state, 0); - // one for the renamed file only - deleteRequests.assertDiffEquals(state, 1); - directoriesDeleted.assertDiffEquals(state, 0); - fakeDirectoriesDeleted.assertDiffEquals(state, 0); - } - - private int directoriesInPath(Path path) { - return path.isRoot() ? 0 : 1 + directoriesInPath(path.getParent()); - } - @Test - public void testCostOfRootRename() throws Throwable { - describe("assert that a root directory rename doesn't" - + " do much in terms of parent dir operations"); + public void testDirProbes() throws Throwable { + describe("Test directory probe cost"); + assumeUnguarded(); S3AFileSystem fs = getFileSystem(); + // Create the empty directory. + Path emptydir = dir(methodPath()); - // unique name, so that even when run in parallel tests, there's no conflict - String uuid = UUID.randomUUID().toString(); - Path src = new Path("/src-" + uuid); - Path dest = new Path("/dest-" + uuid); + // head probe fails + interceptRawGetFileStatusFNFE(emptydir, false, + StatusProbeEnum.HEAD_ONLY, + FILE_STATUS_FILE_PROBE); - try { - MetricDiff deleteRequests = - new MetricDiff(fs, Statistic.OBJECT_DELETE_REQUESTS); - MetricDiff directoriesDeleted = - new MetricDiff(fs, Statistic.DIRECTORIES_DELETED); - MetricDiff fakeDirectoriesDeleted = - new MetricDiff(fs, Statistic.FAKE_DIRECTORIES_DELETED); - MetricDiff directoriesCreated = - new MetricDiff(fs, Statistic.DIRECTORIES_CREATED); - touch(fs, src); - fs.rename(src, dest); - Object summary = new Object() { - @Override - public String toString() { - return String.format("[%s, %s, %s, %s]", - directoriesCreated, directoriesDeleted, - deleteRequests, fakeDirectoriesDeleted); - } - }; - - String state = String.format("after touch(%s) %s", - src, summary); - touch(fs, src); - fs.rename(src, dest); - directoriesCreated.assertDiffEquals(state, 0); - - - state = String.format("after rename(%s, %s) %s", - src, dest, summary); - // here we expect there to be no fake directories - directoriesCreated.assertDiffEquals(state, 0); - // one for the renamed file only - deleteRequests.assertDiffEquals(state, 1); - directoriesDeleted.assertDiffEquals(state, 0); - fakeDirectoriesDeleted.assertDiffEquals(state, 0); - - // delete that destination file, assert only the file delete was issued - reset(deleteRequests, directoriesCreated, directoriesDeleted, - fakeDirectoriesDeleted); - - fs.delete(dest, false); - // here we expect there to be no fake directories - directoriesCreated.assertDiffEquals(state, 0); - // one for the deleted file - deleteRequests.assertDiffEquals(state, 1); - directoriesDeleted.assertDiffEquals(state, 0); - fakeDirectoriesDeleted.assertDiffEquals(state, 0); - } finally { - fs.delete(src, false); - fs.delete(dest, false); - } - } + // a LIST will find it and declare as empty + S3AFileStatus status = verifyRawInnerGetFileStatus(emptydir, true, + StatusProbeEnum.LIST_ONLY, + FILE_STATUS_DIR_PROBE); + assertEmptyDirStatus(status, Tristate.TRUE); - @Test - public void testDirProbes() throws Throwable { - describe("Test directory probe cost -raw only"); - S3AFileSystem fs = getFileSystem(); - assume("Unguarded FS only", !fs.hasMetadataStore()); - String dir = "testEmptyDirHeadProbe"; - Path emptydir = path(dir); - // Create the empty directory. - fs.mkdirs(emptydir); - - // metrics and assertions. - resetMetricDiffs(); - - intercept(FileNotFoundException.class, () -> - fs.innerGetFileStatus(emptydir, false, - StatusProbeEnum.HEAD_ONLY)); - verifyOperationCount(1, 0); - - // a LIST will find it -but it doesn't consider it an empty dir. - S3AFileStatus status = fs.innerGetFileStatus(emptydir, true, - StatusProbeEnum.LIST_ONLY); - verifyOperationCount(0, 1); - Assertions.assertThat(status) - .describedAs("LIST output is not considered empty") - .matches(s -> !s.isEmptyDirectory().equals(Tristate.TRUE), "is empty"); - - // finally, skip all probes and expect no operations toThere are - // take place - intercept(FileNotFoundException.class, () -> - fs.innerGetFileStatus(emptydir, false, - EnumSet.noneOf(StatusProbeEnum.class))); - verifyOperationCount(0, 0); + // skip all probes and expect no operations to take place + interceptRawGetFileStatusFNFE(emptydir, false, + EnumSet.noneOf(StatusProbeEnum.class), + NO_IO); // now add a trailing slash to the key and use the // deep internal s3GetFileStatus method call. String emptyDirTrailingSlash = fs.pathToKey(emptydir.getParent()) - + "/" + dir + "/"; + + "/" + emptydir.getName() + "/"; // A HEAD request does not probe for keys with a trailing / - intercept(FileNotFoundException.class, () -> + interceptRaw(FileNotFoundException.class, "", + NO_IO, () -> fs.s3GetFileStatus(emptydir, emptyDirTrailingSlash, - StatusProbeEnum.HEAD_ONLY, null)); - verifyOperationCount(0, 0); + StatusProbeEnum.HEAD_ONLY, null, false)); // but ask for a directory marker and you get the entry - status = fs.s3GetFileStatus(emptydir, - emptyDirTrailingSlash, - StatusProbeEnum.DIR_MARKER_ONLY, null); - verifyOperationCount(1, 0); + status = verifyRaw(FILE_STATUS_DIR_PROBE, () -> + fs.s3GetFileStatus(emptydir, + emptyDirTrailingSlash, + StatusProbeEnum.LIST_ONLY, + null, + true)); assertEquals(emptydir, status.getPath()); + assertEmptyDirStatus(status, Tristate.TRUE); } + @Test + public void testNeedEmptyDirectoryProbeRequiresList() throws Throwable { + S3AFileSystem fs = getFileSystem(); + + intercept(IllegalArgumentException.class, "", () -> + fs.s3GetFileStatus(new Path("/something"), "/something", + StatusProbeEnum.HEAD_ONLY, null, true)); + } @Test public void testCreateCost() throws Throwable { describe("Test file creation cost -raw only"); + assumeUnguarded(); + Path testFile = methodPath(); + // when overwrite is false, the path is checked for existence. + create(testFile, false, + CREATE_FILE_NO_OVERWRITE); + // but when true: only the directory checks take place. + create(testFile, true, CREATE_FILE_OVERWRITE); + } + + @Test + public void testCreateCostFileExists() throws Throwable { + describe("Test cost of create file failing with existing file"); + assumeUnguarded(); + Path testFile = file(methodPath()); + + // now there is a file there, an attempt with overwrite == false will + // fail on the first HEAD. + interceptRaw(FileAlreadyExistsException.class, "", + FILE_STATUS_FILE_PROBE, + () -> file(testFile, false)); + } + + @Test + public void testCreateCostDirExists() throws Throwable { + describe("Test cost of create file failing with existing dir"); + assumeUnguarded(); + Path testFile = dir(methodPath()); + + // now there is a file there, an attempt with overwrite == false will + // fail on the first HEAD. + interceptRaw(FileAlreadyExistsException.class, "", + GET_FILE_STATUS_ON_DIR_MARKER, + () -> file(testFile, false)); + } + + /** + * Use the builder API. + * This always looks for a parent unless the caller says otherwise. + */ + @Test + public void testCreateBuilder() throws Throwable { + describe("Test builder file creation cost -raw only"); + assumeUnguarded(); + Path testFile = methodPath(); + dir(testFile.getParent()); + + // builder defaults to looking for parent existence (non-recursive) + buildFile(testFile, false, false, + GET_FILE_STATUS_FNFE // destination file + .plus(FILE_STATUS_DIR_PROBE)); // parent dir + // recursive = false and overwrite=true: + // only make sure the dest path isn't a directory. + buildFile(testFile, true, true, + FILE_STATUS_DIR_PROBE); + + // now there is a file there, an attempt with overwrite == false will + // fail on the first HEAD. + interceptRaw(FileAlreadyExistsException.class, "", + GET_FILE_STATUS_ON_FILE, + () -> buildFile(testFile, false, true, + GET_FILE_STATUS_ON_FILE)); + } + + @Test + public void testCostOfGlobStatus() throws Throwable { + describe("Test globStatus has expected cost"); S3AFileSystem fs = getFileSystem(); assume("Unguarded FS only", !fs.hasMetadataStore()); - resetMetricDiffs(); - Path testFile = path("testCreateCost"); - // when overwrite is false, the path is checked for existence. - try (FSDataOutputStream out = fs.create(testFile, false)) { - verifyOperationCount(2, 1); - } + Path basePath = path("testCostOfGlobStatus/nextFolder/"); - // but when true: only the directory checks take place. - try (FSDataOutputStream out = fs.create(testFile, true)) { - verifyOperationCount(1, 1); + // create a bunch of files + int filesToCreate = 10; + for (int i = 0; i < filesToCreate; i++) { + create(basePath.suffix("/" + i)); } + fs.globStatus(basePath.suffix("/*")); + // 2 head + 1 list from getFileStatus on path, + // plus 1 list to match the glob pattern + verifyRaw(LIST_STATUS_LIST_OP, + () -> fs.globStatus(basePath.suffix("/*"))); } + + @Test + public void testCostOfGlobStatusNoSymlinkResolution() throws Throwable { + describe("Test globStatus does not attempt to resolve symlinks"); + S3AFileSystem fs = getFileSystem(); + assume("Unguarded FS only", !fs.hasMetadataStore()); + + Path basePath = path("testCostOfGlobStatusNoSymlinkResolution/f/"); + + // create a single file, globStatus returning a single file on a pattern + // triggers attempts at symlinks resolution if configured + String fileName = "/notASymlinkDOntResolveMeLikeOne"; + create(basePath.suffix(fileName)); + // unguarded: 2 head + 1 list from getFileStatus on path, + // plus 1 list to match the glob pattern + // no additional operations from symlink resolution + verifyRaw(LIST_STATUS_LIST_OP, + () -> fs.globStatus(basePath.suffix("/*"))); + } + + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java index 46d6ffc85e03b..7ce7b8385cec4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs.s3a; +import java.io.FileNotFoundException; + import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -25,21 +27,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.Path; +import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assume.*; import static org.junit.Assert.*; /** * Tests a live S3 system. If your keys and bucket aren't specified, all tests * are marked as passed. - * - * This uses BlockJUnit4ClassRunner because FileSystemContractBaseTest from - * TestCase which uses the old Junit3 runner that doesn't ignore assumptions - * properly making it impossible to skip the tests if we don't have a valid - * bucket. - **/ + */ public class ITestS3AFileSystemContract extends FileSystemContractBaseTest { protected static final Logger LOG = @@ -77,7 +77,7 @@ public Path getTestBaseDir() { @Test public void testMkdirsWithUmask() throws Exception { - // not supported + skip("Not supported"); } @Test @@ -103,8 +103,38 @@ public void testRenameDirectoryAsExistingDirectory() throws Exception { } @Test - public void testMoveDirUnderParent() throws Throwable { - // not support because - // Fails if dst is a directory that is not empty. + public void testRenameDirectoryAsExistingFile() throws Exception { + assumeTrue(renameSupported()); + + Path src = path("testRenameDirectoryAsExistingFile/dir"); + fs.mkdirs(src); + Path dst = path("testRenameDirectoryAsExistingFileNew/newfile"); + createFile(dst); + intercept(FileAlreadyExistsException.class, + () -> rename(src, dst, false, true, true)); + } + + @Test + public void testRenameDirectoryMoveToNonExistentDirectory() + throws Exception { + skip("does not fail"); + } + + @Test + public void testRenameFileMoveToNonExistentDirectory() throws Exception { + skip("does not fail"); + } + + @Test + public void testRenameFileAsExistingFile() throws Exception { + intercept(FileAlreadyExistsException.class, + () -> super.testRenameFileAsExistingFile()); + } + + @Test + public void testRenameNonExistentPath() throws Exception { + intercept(FileNotFoundException.class, + () -> super.testRenameNonExistentPath()); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java index 972c665438e9a..3bfe69c2bca91 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java @@ -21,11 +21,15 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.metrics2.lib.MutableCounterLong; + +import org.assertj.core.api.Assertions; import org.junit.Test; import java.io.IOException; import java.io.InputStream; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; + /** * Test s3a performance metrics register and output. */ @@ -51,17 +55,34 @@ public void testStreamStatistics() throws IOException { Path file = path("testStreamStatistics"); byte[] data = "abcdefghijklmnopqrstuvwxyz".getBytes(); ContractTestUtils.createFile(fs, file, false, data); - - try (InputStream inputStream = fs.open(file)) { + InputStream inputStream = fs.open(file); + try { while (inputStream.read(data) != -1) { LOG.debug("Read batch of data from input stream..."); } + LOG.info("Final stream statistics: {}", + ioStatisticsSourceToString(inputStream)); + } finally { + // this is not try-with-resources only to aid debugging + inputStream.close(); } + final String statName = Statistic.STREAM_READ_BYTES.getSymbol(); + + final S3AInstrumentation instrumentation = fs.getInstrumentation(); + + final long counterValue = instrumentation.getCounterValue(statName); + + final int expectedBytesRead = 26; + Assertions.assertThat(counterValue) + .describedAs("Counter %s from instrumentation %s", + statName, instrumentation) + .isEqualTo(expectedBytesRead); MutableCounterLong read = (MutableCounterLong) - fs.getInstrumentation().getRegistry() - .get(Statistic.STREAM_SEEK_BYTES_READ.getSymbol()); - assertEquals("Stream statistics were not merged", 26, read.value()); + instrumentation.getRegistry() + .get(statName); + assertEquals("Stream statistics were not merged", expectedBytesRead, + read.value()); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java index d0d42b89f1027..e6ebfba922d5f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java @@ -29,6 +29,7 @@ import com.amazonaws.services.s3.model.GetBucketEncryptionResult; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PutObjectRequest; +import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.Test; @@ -47,6 +48,7 @@ import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_ETAG; import static org.hamcrest.Matchers.nullValue; /** @@ -171,6 +173,9 @@ public void testEmptyFileChecksums() throws Throwable { assertNotEquals("file 1 checksum", 0, checksum1.getLength()); assertEquals("checksums of empty files", checksum1, fs.getFileChecksum(touchFile("file2"), 0)); + Assertions.assertThat(fs.getXAttr(file1, XA_ETAG)) + .describedAs("etag from xattr") + .isEqualTo(checksum1.getBytes()); } /** @@ -222,6 +227,9 @@ public void testNonEmptyFileChecksums() throws Throwable { createFile(fs, file4, true, "hello, world".getBytes(StandardCharsets.UTF_8)); assertNotEquals(checksum2, fs.getFileChecksum(file4, 0)); + Assertions.assertThat(fs.getXAttr(file3, XA_ETAG)) + .describedAs("etag from xattr") + .isEqualTo(checksum1.getBytes()); } /** @@ -276,8 +284,14 @@ public void testS3AToStringUnitialized() throws Throwable { } } + @Test + public void testS3AIOStatisticsUninitialized() throws Throwable { + try (S3AFileSystem fs = new S3AFileSystem()) { + fs.getIOStatistics(); + } + + } /** -<<<<<<< ours * Verify that paths with a trailing "/" are fixed up. */ @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARemoteFileChanged.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARemoteFileChanged.java index 3fd70be931997..adcf578b05862 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARemoteFileChanged.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARemoteFileChanged.java @@ -33,7 +33,7 @@ import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.S3Object; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.junit.Assume; import org.junit.Test; import org.junit.runner.RunWith; @@ -272,7 +272,9 @@ public void setup() throws Exception { @Override public void teardown() throws Exception { // restore the s3 client so there's no mocking interfering with the teardown - originalS3Client.ifPresent(fs::setAmazonS3Client); + if (fs != null) { + originalS3Client.ifPresent(fs::setAmazonS3Client); + } super.teardown(); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java index 2ba3fd7a65cde..3d7ee0882efa4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java @@ -21,13 +21,23 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; +import org.apache.hadoop.fs.statistics.StreamStatisticNames; import org.apache.hadoop.io.IOUtils; +import org.assertj.core.api.Assertions; import org.junit.Test; import java.io.IOException; -import static org.apache.hadoop.fs.s3a.Statistic.STREAM_SEEK_BYTES_READ; +import static org.apache.hadoop.fs.s3a.Statistic.STREAM_READ_BYTES; +import static org.apache.hadoop.fs.s3a.Statistic.STREAM_READ_BYTES_READ_CLOSE; +import static org.apache.hadoop.fs.s3a.Statistic.STREAM_READ_TOTAL_BYTES; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.demandStringifyIOStatisticsSource; /** * Integration test for calling @@ -40,6 +50,8 @@ */ public class ITestS3AUnbuffer extends AbstractS3ATestBase { + public static final int FILE_LENGTH = 16; + private Path dest; @Override @@ -48,7 +60,7 @@ public void setup() throws Exception { dest = path("ITestS3AUnbuffer"); describe("ITestS3AUnbuffer"); - byte[] data = ContractTestUtils.dataset(16, 'a', 26); + byte[] data = ContractTestUtils.dataset(FILE_LENGTH, 'a', 26); ContractTestUtils.writeDataset(getFileSystem(), dest, data, data.length, 16, true); } @@ -57,13 +69,41 @@ public void setup() throws Exception { public void testUnbuffer() throws IOException { describe("testUnbuffer"); + IOStatisticsSnapshot iostats = new IOStatisticsSnapshot(); // Open file, read half the data, and then call unbuffer try (FSDataInputStream inputStream = getFileSystem().open(dest)) { assertTrue(inputStream.getWrappedStream() instanceof S3AInputStream); - readAndAssertBytesRead(inputStream, 8); + int bytesToRead = 8; + readAndAssertBytesRead(inputStream, bytesToRead); assertTrue(isObjectStreamOpen(inputStream)); + assertTrue("No IOstatistics from " + inputStream, + iostats.aggregate(inputStream.getIOStatistics())); + verifyStatisticCounterValue(iostats, + StreamStatisticNames.STREAM_READ_BYTES, + bytesToRead); + verifyStatisticCounterValue(iostats, + StoreStatisticNames.ACTION_HTTP_GET_REQUEST, + 1); + + // do the unbuffering inputStream.unbuffer(); + // audit the updated statistics + IOStatistics st2 = inputStream.getIOStatistics(); + + // the unbuffered operation must be tracked + verifyStatisticCounterValue(st2, + StreamStatisticNames.STREAM_READ_UNBUFFERED, + 1); + + // all other counter values consistent. + verifyStatisticCounterValue(st2, + StreamStatisticNames.STREAM_READ_BYTES, + bytesToRead); + verifyStatisticCounterValue(st2, + StoreStatisticNames.ACTION_HTTP_GET_REQUEST, + 1); + // Check the the wrapped stream is closed assertFalse(isObjectStreamOpen(inputStream)); } @@ -71,7 +111,7 @@ public void testUnbuffer() throws IOException { /** * Test that calling {@link S3AInputStream#unbuffer()} merges a stream's - * {@link org.apache.hadoop.fs.s3a.S3AInstrumentation.InputStreamStatistics} + * {@code InputStreamStatistics} * into the {@link S3AFileSystem}'s {@link S3AInstrumentation} instance. */ @Test @@ -79,36 +119,73 @@ public void testUnbufferStreamStatistics() throws IOException { describe("testUnbufferStreamStatistics"); // Validate bytesRead is updated correctly + S3AFileSystem fs = getFileSystem(); S3ATestUtils.MetricDiff bytesRead = new S3ATestUtils.MetricDiff( - getFileSystem(), STREAM_SEEK_BYTES_READ); + fs, STREAM_READ_BYTES); + S3ATestUtils.MetricDiff totalBytesRead = new S3ATestUtils.MetricDiff( + fs, STREAM_READ_TOTAL_BYTES); + S3ATestUtils.MetricDiff bytesReadInClose = new S3ATestUtils.MetricDiff( + fs, STREAM_READ_BYTES_READ_CLOSE); // Open file, read half the data, and then call unbuffer FSDataInputStream inputStream = null; + int firstBytesToRead = 8; + + int secondBytesToRead = 1; + long expectedFinalBytesRead; + long expectedTotalBytesRead; + + Object streamStatsStr; try { - inputStream = getFileSystem().open(dest); + inputStream = fs.open(dest); + streamStatsStr = demandStringifyIOStatisticsSource(inputStream); - readAndAssertBytesRead(inputStream, 8); + LOG.info("initial stream statistics {}", streamStatsStr); + readAndAssertBytesRead(inputStream, firstBytesToRead); + LOG.info("stream statistics after read {}", streamStatsStr); inputStream.unbuffer(); // Validate that calling unbuffer updates the input stream statistics - bytesRead.assertDiffEquals(8); + bytesRead.assertDiffEquals(firstBytesToRead); + final long bytesInUnbuffer = bytesReadInClose.diff(); + totalBytesRead.assertDiffEquals(firstBytesToRead + bytesInUnbuffer); // Validate that calling unbuffer twice in a row updates the statistics // correctly - readAndAssertBytesRead(inputStream, 4); + bytesReadInClose.reset(); + bytesRead.reset(); + readAndAssertBytesRead(inputStream, secondBytesToRead); inputStream.unbuffer(); - bytesRead.assertDiffEquals(12); + LOG.info("stream statistics after second read {}", streamStatsStr); + bytesRead.assertDiffEquals(secondBytesToRead); + final long bytesInClose = bytesReadInClose.diff(); + expectedFinalBytesRead = firstBytesToRead + secondBytesToRead; + expectedTotalBytesRead = expectedFinalBytesRead + + bytesInUnbuffer + bytesInClose; + + totalBytesRead.assertDiffEquals(expectedTotalBytesRead); } finally { + LOG.info("Closing stream"); IOUtils.closeStream(inputStream); } + LOG.info("stream statistics after close {}", streamStatsStr); // Validate that closing the file does not further change the statistics - bytesRead.assertDiffEquals(12); + totalBytesRead.assertDiffEquals(expectedTotalBytesRead); // Validate that the input stream stats are correct when the file is closed - assertEquals("S3AInputStream statistics were not updated properly", 12, - ((S3AInputStream) inputStream.getWrappedStream()) - .getS3AStreamStatistics().bytesRead); + S3AInputStreamStatistics streamStatistics = ((S3AInputStream) inputStream + .getWrappedStream()) + .getS3AStreamStatistics(); + Assertions.assertThat(streamStatistics) + .describedAs("Stream statistics %s", streamStatistics) + .hasFieldOrPropertyWithValue("bytesRead", + expectedFinalBytesRead) + .hasFieldOrPropertyWithValue("totalBytesRead", expectedTotalBytesRead); + assertEquals("S3AInputStream statistics were not updated properly in " + + streamStatsStr, + expectedFinalBytesRead, + streamStatistics.getBytesRead()); } private boolean isObjectStreamOpen(FSDataInputStream inputStream) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardEmptyDirs.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardEmptyDirs.java index ab81491c4cf90..bd69ef28669bb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardEmptyDirs.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardEmptyDirs.java @@ -26,11 +26,13 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.ListObjectsV2Request; import com.amazonaws.services.s3.model.ListObjectsV2Result; +import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.S3ObjectSummary; import org.assertj.core.api.Assertions; import org.junit.Test; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.s3a.impl.StoreContext; @@ -57,6 +59,10 @@ */ public class ITestS3GuardEmptyDirs extends AbstractS3ATestBase { + /** + * Rename an empty directory, verify that the empty dir + * marker moves in both S3Guard and in the S3A FS. + */ @Test public void testRenameEmptyDir() throws Throwable { S3AFileSystem fs = getFileSystem(); @@ -67,7 +73,7 @@ public void testRenameEmptyDir() throws Throwable { String destDirMarker = fs.pathToKey(destDir) + "/"; // set things up. mkdirs(sourceDir); - // there'a source directory marker + // there's source directory marker fs.getObjectMetadata(sourceDirMarker); S3AFileStatus srcStatus = getEmptyDirStatus(sourceDir); assertEquals("Must be an empty dir: " + srcStatus, Tristate.TRUE, @@ -82,8 +88,12 @@ public void testRenameEmptyDir() throws Throwable { () -> getEmptyDirStatus(sourceDir)); // and verify that there's no dir marker hidden under a tombstone intercept(FileNotFoundException.class, - () -> Invoker.once("HEAD", sourceDirMarker, - () -> fs.getObjectMetadata(sourceDirMarker))); + () -> Invoker.once("HEAD", sourceDirMarker, () -> { + ObjectMetadata md = fs.getObjectMetadata(sourceDirMarker); + return String.format("Object %s of length %d", + sourceDirMarker, md.getInstanceLength()); + })); + // the parent dir mustn't be confused S3AFileStatus baseStatus = getEmptyDirStatus(basePath); assertEquals("Must not be an empty dir: " + baseStatus, Tristate.FALSE, @@ -215,7 +225,7 @@ public void testTombstonesAndEmptyDirectories() throws Throwable { // if DDB is the metastore, then we expect no FS requests to be made // at all. S3ATestUtils.MetricDiff listMetric = new S3ATestUtils.MetricDiff(fs, - Statistic.OBJECT_LIST_REQUESTS); + Statistic.OBJECT_LIST_REQUEST); S3ATestUtils.MetricDiff getMetric = new S3ATestUtils.MetricDiff(fs, Statistic.OBJECT_METADATA_REQUESTS); // do a getFile status with empty dir flag @@ -277,4 +287,27 @@ public int read() { s3.putObject(putObjectRequest); } + @Test + public void testDirMarkerDelete() throws Throwable { + S3AFileSystem fs = getFileSystem(); + assumeFilesystemHasMetadatastore(getFileSystem()); + Path baseDir = methodPath(); + Path subFile = new Path(baseDir, "subdir/file.txt"); + // adds the s3guard entry + fs.mkdirs(baseDir); + touch(fs, subFile); + // PUT a marker + createEmptyObject(fs, fs.pathToKey(baseDir) + "/"); + fs.delete(baseDir, true); + assertPathDoesNotExist("Should have been deleted", baseDir); + + // now create the dir again + fs.mkdirs(baseDir); + FileStatus fileStatus = fs.getFileStatus(baseDir); + Assertions.assertThat(fileStatus) + .matches(FileStatus::isDirectory, "Not a directory"); + Assertions.assertThat(fs.listStatus(baseDir)) + .describedAs("listing of %s", baseDir) + .isEmpty(); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java index 6e55796fd3ae5..09f66df4c2ec0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardListConsistency.java @@ -28,9 +28,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.s3a.S3AContract; -import com.google.common.collect.Lists; +import com.amazonaws.services.s3.model.S3ObjectSummary; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.Test; @@ -194,7 +196,7 @@ public void testRollingRenames() throws Exception { } S3AFileSystem fs = getFileSystem(); - assertFalse("Renaming deleted file should have failed", + intercept(FileNotFoundException.class, () -> fs.rename(dir2[0], dir1[0])); assertTrue("Renaming over existing file should have succeeded", fs.rename(dir1[0], dir0[0])); @@ -271,7 +273,10 @@ public void testConsistentRenameAfterDelete() throws Exception { assertTrue(fs.delete(testDirs[1], false)); assertTrue(fs.delete(testDirs[2], false)); - fs.rename(path("a"), path("a3")); + ContractTestUtils.rename(fs, path("a"), path("a3")); + ContractTestUtils.assertPathsDoNotExist(fs, + "Source paths shouldn't exist post rename operation", + testDirs[0], testDirs[1], testDirs[2]); FileStatus[] paths = fs.listStatus(path("a3/b")); List list = new ArrayList<>(); for (FileStatus fileState : paths) { @@ -556,24 +561,23 @@ public void testInconsistentS3ClientDeletes() throws Throwable { + " paths"); ListObjectsV2Result postDeleteDelimited = listObjectsV2(fs, key, "/"); - assertListSizeEqual( + boolean stripTombstones = false; + assertObjectSummariesEqual( "InconsistentAmazonS3Client added back objects incorrectly " + "in a non-recursive listing", - preDeleteDelimited.getObjectSummaries(), - postDeleteDelimited.getObjectSummaries()); + preDeleteDelimited, postDeleteDelimited, + stripTombstones); assertListSizeEqual("InconsistentAmazonS3Client added back prefixes incorrectly " + "in a non-recursive listing", preDeleteDelimited.getCommonPrefixes(), - postDeleteDelimited.getCommonPrefixes() - ); + postDeleteDelimited.getCommonPrefixes()); LOG.info("Executing Deep listing"); ListObjectsV2Result postDeleteUndelimited = listObjectsV2(fs, key, null); - assertListSizeEqual("InconsistentAmazonS3Client added back objects incorrectly " + - "in a recursive listing", - preDeleteUndelimited.getObjectSummaries(), - postDeleteUndelimited.getObjectSummaries() - ); + assertObjectSummariesEqual("InconsistentAmazonS3Client added back objects" + + " incorrectly in a recursive listing", + preDeleteUndelimited, postDeleteUndelimited, + stripTombstones); assertListSizeEqual("InconsistentAmazonS3Client added back prefixes incorrectly " + "in a recursive listing", @@ -582,6 +586,24 @@ public void testInconsistentS3ClientDeletes() throws Throwable { ); } + private void assertObjectSummariesEqual(final String message, + final ListObjectsV2Result expected, + final ListObjectsV2Result actual, + final boolean stripTombstones) { + assertCollectionsEqual( + message, + stringify(expected.getObjectSummaries(), stripTombstones), + stringify(actual.getObjectSummaries(), stripTombstones)); + } + + List stringify(List objects, + boolean stripTombstones) { + return objects.stream() + .filter(s -> !stripTombstones || !(s.getKey().endsWith("/"))) + .map(s -> s.getKey()) + .collect(Collectors.toList()); + } + /** * Require the v2 S3 list API. */ @@ -678,6 +700,22 @@ public void testListingReturnsVersionMetadata() throws Throwable { versionId, locatedFileStatus.getVersionId()); } + /** + * Assert that the two collections match using + * object equality of the elements within. + * @param message text for the assertion + * @param expected expected list + * @param actual actual list + * @param type of list + */ + private void assertCollectionsEqual(String message, + Collection expected, + Collection actual) { + Assertions.assertThat(actual) + .describedAs(message) + .containsExactlyInAnyOrderElementsOf(expected); + } + /** * Assert that the two list sizes match; failure message includes the lists. * @param message text for the assertion diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardOutOfBandOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardOutOfBandOperations.java index 70c62bf49caee..2d4173d1c2ad4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardOutOfBandOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3GuardOutOfBandOperations.java @@ -56,7 +56,8 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeTextFile; import static org.apache.hadoop.fs.s3a.Constants.AUTHORITATIVE_PATH; -import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_METADATASTORE_METADATA_TTL; +import static org.apache.hadoop.fs.s3a.Constants.CHANGE_DETECT_MODE; +import static org.apache.hadoop.fs.s3a.Constants.CHANGE_DETECT_MODE_NONE; import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_METADATA_TTL; import static org.apache.hadoop.fs.s3a.Constants.RETRY_INTERVAL; @@ -76,6 +77,7 @@ import static org.apache.hadoop.test.LambdaTestUtils.eventually; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.test.LambdaTestUtils.interceptFuture; import static org.junit.Assume.assumeTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -168,12 +170,16 @@ protected Configuration createConfiguration() { RETRY_LIMIT, RETRY_INTERVAL, S3GUARD_CONSISTENCY_RETRY_INTERVAL, - S3GUARD_CONSISTENCY_RETRY_LIMIT); + S3GUARD_CONSISTENCY_RETRY_LIMIT, + CHANGE_DETECT_MODE, + METADATASTORE_METADATA_TTL); conf.setInt(RETRY_LIMIT, 3); conf.setInt(S3GUARD_CONSISTENCY_RETRY_LIMIT, 3); + conf.set(CHANGE_DETECT_MODE, CHANGE_DETECT_MODE_NONE); final String delay = "10ms"; conf.set(RETRY_INTERVAL, delay); conf.set(S3GUARD_CONSISTENCY_RETRY_INTERVAL, delay); + conf.set(METADATASTORE_METADATA_TTL, delay); return conf; } @@ -231,12 +237,13 @@ private S3AFileSystem createGuardedFS(boolean authoritativeMode) URI uri = testFS.getUri(); removeBaseAndBucketOverrides(uri.getHost(), config, + CHANGE_DETECT_MODE, METADATASTORE_AUTHORITATIVE, METADATASTORE_METADATA_TTL, AUTHORITATIVE_PATH); config.setBoolean(METADATASTORE_AUTHORITATIVE, authoritativeMode); config.setLong(METADATASTORE_METADATA_TTL, - DEFAULT_METADATASTORE_METADATA_TTL); + 5_000); final S3AFileSystem gFs = createFS(uri, config); // set back the same metadata store instance gFs.setMetadataStore(realMs); @@ -856,7 +863,7 @@ private void verifyFileStatusAsExpected(final String firstText, expectedLength, guardedLength); } else { assertEquals( - "File length in authoritative table with " + stats, + "File length in non-authoritative table with " + stats, expectedLength, guardedLength); } } @@ -964,6 +971,14 @@ public void testListingDelete() throws Exception { // Delete the file without S3Guard (raw) deleteFile(rawFS, testFilePath); + // now, versioned FS or not, it will not be readable from the + // raw FS, and this will fail in both open APIs during the open + // phase, rather than when a read is attempted. + interceptFuture(FileNotFoundException.class, "", + rawFS.openFile(testFilePath).build()); + intercept(FileNotFoundException.class, () -> + rawFS.open(testFilePath).close()); + // File status will be still readable from s3guard S3AFileStatus status = (S3AFileStatus) guardedFs.getFileStatus(testFilePath); @@ -985,8 +1000,6 @@ public void testListingDelete() throws Exception { Assertions.assertThat(toChar(bytes)) .describedAs("open(%s)", testFilePath) .isEqualTo(text); - expectExceptionWhenReadingOpenFileAPI(rawFS, testFilePath, text, - null); } else { // unversioned sequence expectExceptionWhenReading(testFilePath, text); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java index e30269298111d..e2915884cefa3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java @@ -39,11 +39,14 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets; import org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase; +import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; +import org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext; import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; import org.apache.hadoop.util.Progressable; -import static com.google.common.base.Preconditions.checkNotNull; -import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.stubDurationTrackerFactory; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; /** * Relays FS calls to the mocked FS, allows for some extra logging with @@ -83,8 +86,6 @@ public class MockS3AFileSystem extends S3AFileSystem { * mock FS. */ private int logEvents = LOG_NAME; - private final S3AInstrumentation instrumentation = - new S3AInstrumentation(FS_URI); private Configuration conf; private WriteOperationHelper writeHelper; @@ -146,12 +147,12 @@ public Path qualify(final Path path) { public void initialize(URI name, Configuration originalConf) throws IOException { conf = originalConf; - writeHelper = new WriteOperationHelper(this, conf); + writeHelper = new WriteOperationHelper(this, conf, + new EmptyS3AStatisticsContext()); } @Override public void close() { - cleanupWithLogger(LOG, instrumentation); } @Override @@ -331,7 +332,7 @@ void deleteObjectAtPath(Path f, } @Override - void maybeCreateFakeParentDirectory(Path path) + protected void maybeCreateFakeParentDirectory(Path path) throws IOException, AmazonClientException { // no-op } @@ -359,12 +360,17 @@ public String toString() { } @Override - public S3AInstrumentation.CommitterStatistics newCommitterStatistics() { - return instrumentation.newCommitterStatistics(); + public CommitterStatistics newCommitterStatistics() { + return EmptyS3AStatisticsContext.EMPTY_COMMITTER_STATISTICS; } @Override public void operationRetried(Exception ex) { /** no-op */ } + + @Override + protected DurationTrackerFactory getDurationTrackerFactory() { + return stubDurationTrackerFactory(); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java index 4644cf24764ae..bd121ba2728eb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3ClientFactory.java @@ -23,7 +23,6 @@ import java.net.URI; import java.util.ArrayList; -import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.MultipartUploadListing; import com.amazonaws.services.s3.model.Region; @@ -35,11 +34,10 @@ public class MockS3ClientFactory implements S3ClientFactory { @Override - public AmazonS3 createS3Client(URI name, - final String bucket, - final AWSCredentialsProvider credentialSet, - final String userAgentSuffix) { + public AmazonS3 createS3Client(URI uri, + final S3ClientCreationParameters parameters) { AmazonS3 s3 = mock(AmazonS3.class); + String bucket = uri.getHost(); when(s3.doesBucketExist(bucket)).thenReturn(true); when(s3.doesBucketExistV2(bucket)).thenReturn(true); // this listing is used in startup if purging is enabled, so diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java index 8be3ff7dfda5b..861824277aca9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java @@ -88,7 +88,7 @@ public static void clearAnyUploads(S3AFileSystem fs, Path path) { while (uploads.hasNext()) { MultipartUpload upload = uploads.next(); fs.getWriteOperationHelper().abortMultipartUpload(upload.getKey(), - upload.getUploadId(), LOG_EVENT); + upload.getUploadId(), true, LOG_EVENT); LOG.debug("Cleaning up upload: {} {}", upload.getKey(), truncatedUploadId(upload.getUploadId())); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java index 118c9ee773a6b..c5670b09c3db5 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java @@ -87,10 +87,15 @@ public interface S3ATestConstants { */ String KEY_CSVTEST_FILE = S3A_SCALE_TEST + "csvfile"; + /** + * The landsat bucket: {@value}. + */ + String LANDSAT_BUCKET = "s3a://landsat-pds/"; + /** * Default path for the multi MB test file: {@value}. */ - String DEFAULT_CSVTEST_FILE = "s3a://landsat-pds/scene_list.gz"; + String DEFAULT_CSVTEST_FILE = LANDSAT_BUCKET + "scene_list.gz"; /** * Name of the property to define the timeout for scale tests: {@value}. @@ -218,4 +223,10 @@ public interface S3ATestConstants { */ String S3GUARD_DDB_TEST_TABLE_NAME_KEY = "fs.s3a.s3guard.ddb.test.table"; + + /** + * Test option to enable audits of the method path after + * every test case. + */ + String DIRECTORY_MARKER_AUDIT = "fs.s3a.directory.marker.audit"; } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index f75e37ecaaf37..e3e399ee7a804 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -35,16 +35,28 @@ import org.apache.hadoop.fs.s3a.auth.MarshalledCredentials; import org.apache.hadoop.fs.s3a.commit.CommitConstants; +import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; +import org.apache.hadoop.fs.s3a.impl.ContextAccessors; import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.impl.StoreContextBuilder; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; +import org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext; import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; import org.apache.hadoop.fs.s3a.s3guard.MetadataStoreCapabilities; +import org.apache.hadoop.fs.s3a.s3guard.S3Guard; +import org.apache.hadoop.fs.s3a.test.OperationTrackingStore; import org.apache.hadoop.fs.s3native.S3xLoginHelper; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.functional.CallableRaisingIOE; import com.amazonaws.auth.AWSCredentialsProvider; import org.hamcrest.core.Is; @@ -67,9 +79,10 @@ import java.util.TreeSet; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CREDENTIAL_PROVIDER_PATH; import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; @@ -80,7 +93,6 @@ import static org.apache.hadoop.fs.s3a.S3AUtils.propagateBucketOptions; import static org.apache.hadoop.test.LambdaTestUtils.eventually; import static org.apache.hadoop.test.LambdaTestUtils.intercept; -import static org.apache.hadoop.fs.s3a.commit.CommitConstants.MAGIC_COMMITTER_ENABLED; import static org.junit.Assert.*; /** @@ -615,9 +627,14 @@ public static Configuration prepareTestConfiguration(final Configuration conf) { conf.set(HADOOP_TMP_DIR, tmpDir); } conf.set(BUFFER_DIR, tmpDir); - // add this so that even on tests where the FS is shared, - // the FS is always "magic" - conf.setBoolean(MAGIC_COMMITTER_ENABLED, true); + + // directory marker policy + String directoryRetention = getTestProperty( + conf, + DIRECTORY_MARKER_POLICY, + DEFAULT_DIRECTORY_MARKER_POLICY); + conf.set(DIRECTORY_MARKER_POLICY, directoryRetention); + return conf; } @@ -810,6 +827,16 @@ public static void removeBaseAndBucketOverrides( removeBaseAndBucketOverrides(getTestBucketName(conf), conf, options); } + /** + * Disable S3Guard from the test bucket in a configuration. + * @param conf configuration. + */ + public static void disableS3GuardInTestBucket(Configuration conf) { + removeBaseAndBucketOverrides(getTestBucketName(conf), conf, + S3_METADATA_STORE_IMPL, + DIRECTORY_MARKER_POLICY); + conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); + } /** * Call a function; any exception raised is logged at info. * This is for test teardowns. @@ -818,9 +845,9 @@ public static void removeBaseAndBucketOverrides( * @param type of operation. */ public static void callQuietly(final Logger log, - final Invoker.Operation operation) { + final CallableRaisingIOE operation) { try { - operation.execute(); + operation.apply(); } catch (Exception e) { log.info(e.toString(), e); } @@ -882,7 +909,51 @@ public static T terminateService(final T service) { public static S3AFileStatus getStatusWithEmptyDirFlag( final S3AFileSystem fs, final Path dir) throws IOException { - return fs.innerGetFileStatus(dir, true, StatusProbeEnum.ALL); + return fs.innerGetFileStatus(dir, true, + StatusProbeEnum.ALL); + } + + /** + * Create mock implementation of store context. + * @param multiDelete + * @param store + * @param accessors + * @return + * @throws URISyntaxException + * @throws IOException + */ + public static StoreContext createMockStoreContext( + boolean multiDelete, + OperationTrackingStore store, + ContextAccessors accessors) + throws URISyntaxException, IOException { + URI name = new URI("s3a://bucket"); + Configuration conf = new Configuration(); + return new StoreContextBuilder().setFsURI(name) + .setBucket("bucket") + .setConfiguration(conf) + .setUsername("alice") + .setOwner(UserGroupInformation.getCurrentUser()) + .setExecutor(BlockingThreadPoolExecutorService.newInstance( + 4, + 4, + 10, TimeUnit.SECONDS, + "s3a-transfer-shared")) + .setExecutorCapacity(DEFAULT_EXECUTOR_CAPACITY) + .setInvoker( + new Invoker(RetryPolicies.TRY_ONCE_THEN_FAIL, Invoker.LOG_EVENT)) + .setInstrumentation(new EmptyS3AStatisticsContext()) + .setStorageStatistics(new S3AStorageStatistics()) + .setInputPolicy(S3AInputPolicy.Normal) + .setChangeDetectionPolicy( + ChangeDetectionPolicy.createPolicy(ChangeDetectionPolicy.Mode.None, + ChangeDetectionPolicy.Source.ETag, false)) + .setMultiObjectDeleteEnabled(multiDelete) + .setMetadataStore(store) + .setUseListV1(false) + .setContextAccessors(accessors) + .setTimeProvider(new S3Guard.TtlTimeProvider(conf)) + .build(); } /** @@ -948,8 +1019,14 @@ public String toString() { * @param expected expected value. */ public void assertDiffEquals(String message, long expected) { - Assert.assertEquals(message + ": " + statistic.getSymbol(), - expected, diff()); + String text = message + ": " + statistic.getSymbol(); + long diff = diff(); + if (expected != diff) { + // Log in error ensures that the details appear in the test output + LOG.error(text + " expected {}, actual {}", expected, diff); + } + Assert.assertEquals(text, + expected, diff); } /** @@ -1162,7 +1239,7 @@ public static void assume(String message, boolean condition) { * @param out output stream * @return the (active) stats of the write */ - public static S3AInstrumentation.OutputStreamStatistics + public static BlockOutputStreamStatistics getOutputStreamStatistics(FSDataOutputStream out) { S3ABlockOutputStream blockOutputStream = (S3ABlockOutputStream) out.getWrappedStream(); @@ -1441,4 +1518,27 @@ public static Set getCurrentThreadNames() { .collect(Collectors.toCollection(TreeSet::new)); return threads; } + + /** + * Call the package-private {@code innerGetFileStatus()} method + * on the passed in FS. + * @param fs filesystem + * @param path path + * @param needEmptyDirectoryFlag look for empty directory + * @param probes file status probes to perform + * @return the status + * @throws IOException + */ + public static S3AFileStatus innerGetFileStatus( + S3AFileSystem fs, + Path path, + boolean needEmptyDirectoryFlag, + Set probes) throws IOException { + + return fs.innerGetFileStatus( + path, + needEmptyDirectoryFlag, + probes); + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/StorageStatisticsTracker.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/StorageStatisticsTracker.java index 7d17d699c0564..82f2b268899de 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/StorageStatisticsTracker.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/StorageStatisticsTracker.java @@ -22,7 +22,7 @@ import java.util.Iterator; import java.util.Map; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.StorageStatistics; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestListing.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestListing.java index 1a533bfe64609..77ba31c3ce8ea 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestListing.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestListing.java @@ -18,9 +18,13 @@ package org.apache.hadoop.fs.s3a; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; + +import org.assertj.core.api.Assertions; import org.junit.Assert; import org.junit.Test; @@ -31,8 +35,11 @@ import java.util.NoSuchElementException; import java.util.Set; -import static org.apache.hadoop.fs.s3a.S3AUtils.ACCEPT_ALL; -import static org.apache.hadoop.fs.s3a.Listing.ProvidedFileStatusIterator; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.extractStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Place for the S3A listing classes; keeps all the small classes under control. @@ -40,11 +47,18 @@ public class TestListing extends AbstractS3AMockTest { private static class MockRemoteIterator implements - RemoteIterator { + RemoteIterator, IOStatisticsSource { + + private final IOStatisticsStore ioStatistics; + private Iterator iterator; MockRemoteIterator(Collection source) { iterator = source.iterator(); + this.ioStatistics = iostatisticsStore() + .withDurationTracking(OBJECT_LIST_REQUEST) + .build(); + ioStatistics.incrementCounter(OBJECT_LIST_REQUEST); } public boolean hasNext() { @@ -54,6 +68,11 @@ public boolean hasNext() { public S3AFileStatus next() { return iterator.next(); } + + @Override + public IOStatistics getIOStatistics() { + return ioStatistics; + } } private S3AFileStatus blankFileStatus(Path path) { @@ -65,11 +84,9 @@ public void testTombstoneReconcilingIterator() throws Exception { Path parent = new Path("/parent"); Path liveChild = new Path(parent, "/liveChild"); Path deletedChild = new Path(parent, "/deletedChild"); - Path[] allFiles = {parent, liveChild, deletedChild}; - Path[] liveFiles = {parent, liveChild}; - Listing listing = new Listing(fs); - Collection statuses = new ArrayList<>(); + Listing listing = fs.getListing(); + Collection statuses = new ArrayList<>(); statuses.add(blankFileStatus(parent)); statuses.add(blankFileStatus(liveChild)); statuses.add(blankFileStatus(deletedChild)); @@ -92,7 +109,13 @@ public void testTombstoneReconcilingIterator() throws Exception { while (reconcilingIterator.hasNext()) { actualPaths.add(reconcilingIterator.next().getPath()); } - Assert.assertTrue(actualPaths.equals(expectedPaths)); + Assertions.assertThat(actualPaths) + .describedAs("paths from iterator") + .isEqualTo(expectedPaths); + + // now verify the stats went all the way through. + IOStatistics iostats = extractStatistics(reconcilingIterator); + verifyStatisticCounterValue(iostats, OBJECT_LIST_REQUEST, 1); } @Test @@ -104,18 +127,13 @@ public void testProvidedFileStatusIteratorEnd() throws Exception { S3AFileStatus[] statuses = { s3aStatus }; - ProvidedFileStatusIterator it = new ProvidedFileStatusIterator(statuses, - ACCEPT_ALL, new Listing.AcceptAllButS3nDirs()); + RemoteIterator it = Listing.toProvidedFileStatusIterator( + statuses); Assert.assertTrue("hasNext() should return true first time", it.hasNext()); - Assert.assertNotNull("first element should not be null", it.next()); + Assert.assertEquals("first element from iterator", + s3aStatus, it.next()); Assert.assertFalse("hasNext() should now be false", it.hasNext()); - try { - it.next(); - Assert.fail("next() should have thrown exception"); - } catch (NoSuchElementException e) { - // Correct behavior. Any other exceptions are propagated as failure. - return; - } + intercept(NoSuchElementException.class, it::next); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java index 3822ee781dcc8..08d5e0db0d2f0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java @@ -30,7 +30,7 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.EnvironmentVariableCredentialsProvider; import com.amazonaws.auth.InstanceProfileCredentialsProvider; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java index ff176f58da67d..baa4a542c855a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java @@ -18,18 +18,23 @@ package org.apache.hadoop.fs.s3a; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.s3a.commit.PutTracker; +import org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext; import org.apache.hadoop.util.Progressable; import org.junit.Before; import org.junit.Test; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.concurrent.ExecutorService; -import static org.junit.Assert.*; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; /** * Unit tests for {@link S3ABlockOutputStream}. @@ -45,11 +50,10 @@ public void setUp() throws Exception { S3ADataBlocks.BlockFactory blockFactory = mock(S3ADataBlocks.BlockFactory.class); long blockSize = Constants.DEFAULT_MULTIPART_SIZE; - S3AInstrumentation.OutputStreamStatistics statistics = null; WriteOperationHelper oHelper = mock(WriteOperationHelper.class); PutTracker putTracker = mock(PutTracker.class); stream = spy(new S3ABlockOutputStream(fs, "", executorService, - progressable, blockSize, blockFactory, statistics, oHelper, + progressable, blockSize, blockFactory, null, oHelper, putTracker)); } @@ -57,10 +61,51 @@ public void setUp() throws Exception { public void testFlushNoOpWhenStreamClosed() throws Exception { doThrow(new IOException()).when(stream).checkOpen(); - try { - stream.flush(); - } catch (Exception e){ - fail("Should not have any exception."); - } + stream.flush(); + } + + @Test + public void testWriteOperationHelperPartLimits() throws Throwable { + S3AFileSystem s3a = mock(S3AFileSystem.class); + when(s3a.getBucket()).thenReturn("bucket"); + WriteOperationHelper woh = new WriteOperationHelper(s3a, + new Configuration(), + new EmptyS3AStatisticsContext()); + ByteArrayInputStream inputStream = new ByteArrayInputStream( + "a".getBytes()); + // first one works + String key = "destKey"; + woh.newUploadPartRequest(key, + "uploadId", 1, 1024, inputStream, null, 0L); + // but ask past the limit and a PathIOE is raised + intercept(PathIOException.class, key, + () -> woh.newUploadPartRequest(key, + "uploadId", 50000, 1024, inputStream, null, 0L)); + } + + static class StreamClosedException extends IOException {} + + @Test + public void testStreamClosedAfterAbort() throws Exception { + stream.abort(); + + // This verification replaces testing various operations after calling + // abort: after calling abort, stream is closed like calling close(). + intercept(IOException.class, () -> stream.checkOpen()); + + // check that calling write() will call checkOpen() and throws exception + doThrow(new StreamClosedException()).when(stream).checkOpen(); + + intercept(StreamClosedException.class, + () -> stream.write(new byte[] {'a', 'b', 'c'})); + } + + @Test + public void testCallingCloseAfterCallingAbort() throws Exception { + stream.abort(); + + // This shouldn't throw IOException like calling close() multiple times. + // This will ensure abort() can be called with try-with-resource. + stream.close(); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AGetFileStatus.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AGetFileStatus.java index e90518a9cbd0f..34a275b580f25 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AGetFileStatus.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AGetFileStatus.java @@ -76,11 +76,15 @@ public void testFakeDirectory() throws Exception { String key = path.toUri().getPath().substring(1); when(s3.getObjectMetadata(argThat(correctGetMetadataRequest(BUCKET, key)))) .thenThrow(NOT_FOUND); - ObjectMetadata meta = new ObjectMetadata(); - meta.setContentLength(0L); - when(s3.getObjectMetadata(argThat( - correctGetMetadataRequest(BUCKET, key + "/")) - )).thenReturn(meta); + String keyDir = key + "/"; + ListObjectsV2Result listResult = new ListObjectsV2Result(); + S3ObjectSummary objectSummary = new S3ObjectSummary(); + objectSummary.setKey(keyDir); + objectSummary.setSize(0L); + listResult.getObjectSummaries().add(objectSummary); + when(s3.listObjectsV2(argThat( + matchListV2Request(BUCKET, keyDir)) + )).thenReturn(listResult); FileStatus stat = fs.getFileStatus(path); assertNotNull(stat); assertEquals(fs.makeQualified(path), stat.getPath()); @@ -161,4 +165,14 @@ private ArgumentMatcher correctGetMetadataRequest( && request.getBucketName().equals(bucket) && request.getKey().equals(key); } + + private ArgumentMatcher matchListV2Request( + String bucket, String key) { + return (ListObjectsV2Request request) -> { + return request != null + && request.getBucketName().equals(bucket) + && request.getPrefix().equals(key); + }; + } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java index 3d7cdfc08dec4..4d3930fbc3c1d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java @@ -18,8 +18,6 @@ package org.apache.hadoop.fs.s3a; -import java.util.concurrent.atomic.AtomicLong; - import com.amazonaws.AmazonServiceException; import com.amazonaws.SdkBaseException; import com.amazonaws.services.s3.Headers; @@ -36,6 +34,7 @@ import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; import org.apache.hadoop.fs.s3a.impl.ChangeTracker; +import org.apache.hadoop.fs.s3a.statistics.impl.CountingChangeTracker; import org.apache.hadoop.test.HadoopTestBase; import static org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy.CHANGE_DETECTED; @@ -359,7 +358,7 @@ protected void assertTrackerMismatchCount( final ChangeTracker tracker, final int expectedCount) { assertEquals("counter in tracker " + tracker, - expectedCount, tracker.getVersionMismatches().get()); + expectedCount, tracker.getVersionMismatches()); } /** @@ -386,7 +385,7 @@ protected ChangeTracker newTracker(final ChangeDetectionPolicy.Mode mode, source, requireVersion); ChangeTracker tracker = new ChangeTracker(URI, policy, - new AtomicLong(0), objectAttributes); + new CountingChangeTracker(), objectAttributes); if (objectAttributes.getVersionId() == null && objectAttributes.getETag() == null) { assertFalse("Tracker should not have applied constraints " + tracker, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestWildflyAndOpenSSLBinding.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestWildflyAndOpenSSLBinding.java new file mode 100644 index 0000000000000..a2b013f468a79 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestWildflyAndOpenSSLBinding.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a; + +import java.io.IOException; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; +import org.junit.Before; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.s3a.Constants.SSL_CHANNEL_MODE; +import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.bindSSLChannelMode; +import static org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory.SSLChannelMode.Default; +import static org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory.SSLChannelMode.Default_JSSE; +import static org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory.SSLChannelMode.Default_JSSE_with_GCM; +import static org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory.SSLChannelMode.OpenSSL; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; + +/** + * Make sure that wildfly is not on this classpath and that we can still + * create connections in the default option, but that openssl fails. + * This test suite is designed to work whether or not wildfly JAR is on + * the classpath, and when openssl native libraries are/are not + * on the path. + * Some of the tests are skipped in a maven build because wildfly + * is always on the classpath -but they are retained as in-IDE + * runs may be different, and if wildfly is removed from + * the compile or test CP then different test cases will execute. + */ +public class TestWildflyAndOpenSSLBinding extends AbstractHadoopTestBase { + + /** Was wildfly found. */ + private boolean hasWildfly; + + @Before + public void setup() throws Exception { + // determine whether or not wildfly is on the classpath + ClassLoader loader = this.getClass().getClassLoader(); + try { + loader.loadClass("org.wildfly.openssl.OpenSSLProvider"); + hasWildfly = true; + } catch (ClassNotFoundException e) { + hasWildfly = false; + } + } + + + @Test + public void testUnknownMode() throws Throwable { + DelegatingSSLSocketFactory.resetDefaultFactory(); + Configuration conf = new Configuration(false); + conf.set(SSL_CHANNEL_MODE, "no-such-mode "); + intercept(IllegalArgumentException.class, () -> + bindSSLChannelMode(conf, new ClientConfiguration())); + } + + @Test + public void testOpenSSLNoWildfly() throws Throwable { + assumeThat(hasWildfly).isFalse(); + intercept(NoClassDefFoundError.class, "wildfly", () -> + bindSocketFactory(OpenSSL)); + } + + /** + * If there is no WF on the CP, then we always downgrade + * to default. + */ + @Test + public void testDefaultDowngradesNoWildfly() throws Throwable { + assumeThat(hasWildfly).isFalse(); + expectBound(Default, Default_JSSE); + } + + /** + * Wildfly is on the CP; if openssl native is on the + * path then openssl will load, otherwise JSSE. + */ + @Test + public void testWildflyOpenSSL() throws Throwable { + assumeThat(hasWildfly).isTrue(); + assertThat(bindSocketFactory(Default)) + .describedAs("Sockets from mode " + Default) + .isIn(OpenSSL, Default_JSSE); + } + + @Test + public void testJSSE() throws Throwable { + expectBound(Default_JSSE, Default_JSSE); + } + + @Test + public void testGCM() throws Throwable { + expectBound(Default_JSSE_with_GCM, Default_JSSE_with_GCM); + } + + /** + * Bind to a socket mode and verify that the result matches + * that expected -which does not have to be the one requested. + * @param channelMode mode to use + * @param finalMode mode to test for + */ + private void expectBound( + DelegatingSSLSocketFactory.SSLChannelMode channelMode, + DelegatingSSLSocketFactory.SSLChannelMode finalMode) + throws Throwable { + assertThat(bindSocketFactory(channelMode)) + .describedAs("Channel mode of socket factory created with mode %s", + channelMode) + .isEqualTo(finalMode); + } + + /** + * Bind the socket factory to a given channel mode. + * @param channelMode mode to use + * @return the actual channel mode. + */ + private DelegatingSSLSocketFactory.SSLChannelMode bindSocketFactory( + final DelegatingSSLSocketFactory.SSLChannelMode channelMode) + throws IOException { + DelegatingSSLSocketFactory.resetDefaultFactory(); + Configuration conf = new Configuration(false); + conf.set(SSL_CHANNEL_MODE, channelMode.name()); + ClientConfiguration awsConf = new ClientConfiguration(); + awsConf.setProtocol(Protocol.HTTPS); + bindSSLChannelMode(conf, awsConf); + return DelegatingSSLSocketFactory.getDefaultFactory().getChannelMode(); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java index cf935d28591ba..814292c45d83b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java @@ -51,6 +51,7 @@ import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; import org.apache.hadoop.fs.s3a.s3guard.S3GuardTool; +import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics; import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; import static org.apache.hadoop.fs.s3a.Constants.*; @@ -62,6 +63,7 @@ import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.forbidden; import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.newAssumedRoleConfig; import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.exec; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.apache.hadoop.test.LambdaTestUtils.*; @@ -392,11 +394,17 @@ public void testAssumeRoleRestrictedPolicyFS() throws Exception { // when S3Guard is enabled, the restricted policy still // permits S3Guard record lookup, so getFileStatus calls // will work iff the record is in the database. + // probe the store using a path other than /, so a HEAD + // request is issued. forbidden("getFileStatus", - () -> fs.getFileStatus(ROOT)); + () -> fs.getFileStatus(methodPath())); } forbidden("", () -> fs.listStatus(ROOT)); + forbidden("", + () -> fs.listFiles(ROOT, true)); + forbidden("", + () -> fs.listLocatedStatus(ROOT)); forbidden("", () -> fs.mkdirs(path("testAssumeRoleFS"))); } @@ -547,7 +555,6 @@ public void testAssumedRoleRetryHandler() throws Throwable { public void testRestrictedCommitActions() throws Throwable { describe("Attempt commit operations against a path with restricted rights"); Configuration conf = createAssumedRoleConfig(); - conf.setBoolean(CommitConstants.MAGIC_COMMITTER_ENABLED, true); final int uploadPartSize = 5 * 1024 * 1024; ProgressCounter progress = new ProgressCounter(); @@ -569,8 +576,11 @@ public void testRestrictedCommitActions() throws Throwable { .addResources(directory(writeableDir)) ); roleFS = (S3AFileSystem) writeableDir.getFileSystem(conf); - CommitOperations fullOperations = new CommitOperations(fs); - CommitOperations operations = new CommitOperations(roleFS); + CommitterStatistics committerStatistics = fs.newCommitterStatistics(); + CommitOperations fullOperations = new CommitOperations(fs, + committerStatistics); + CommitOperations operations = new CommitOperations(roleFS, + committerStatistics); File localSrc = File.createTempFile("source", ""); writeCSVData(localSrc); @@ -604,7 +614,7 @@ public void testRestrictedCommitActions() throws Throwable { name + CommitConstants.PENDING_SUFFIX), true); assertTrue(src.delete()); })); - progress.assertCount("Process counter is not expected", + progress.assertCount("progress counter is not expected", range); try { @@ -648,6 +658,8 @@ public void testRestrictedCommitActions() throws Throwable { } finally { LOG.info("Cleanup"); fullOperations.abortPendingUploadsUnderPath(readOnlyDir); + LOG.info("Committer statistics {}", + ioStatisticsSourceToString(committerStatistics)); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java index 2e13deb81c11c..72af1752b1253 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java @@ -23,12 +23,11 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import com.amazonaws.SignableRequest; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.Signer; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.AWSS3V4Signer; import org.assertj.core.api.Assertions; import org.junit.Test; @@ -40,14 +39,15 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; -import org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider; +import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.auth.ITestCustomSigner.CustomSignerInitializer.StoreValue; import org.apache.hadoop.fs.s3a.auth.delegation.DelegationTokenProvider; import org.apache.hadoop.security.UserGroupInformation; import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS; import static org.apache.hadoop.fs.s3a.Constants.SIGNING_ALGORITHM_S3; -import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.fixBucketRegion; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching; /** * Tests for custom Signers and SignerInitializers. @@ -62,23 +62,32 @@ public class ITestCustomSigner extends AbstractS3ATestBase { private String regionName; + private String endpoint; + @Override public void setup() throws Exception { super.setup(); - regionName = determineRegion(getFileSystem().getBucket()); + final S3AFileSystem fs = getFileSystem(); + regionName = determineRegion(fs.getBucket()); LOG.info("Determined region name to be [{}] for bucket [{}]", regionName, - getFileSystem().getBucket()); + fs.getBucket()); + endpoint = fs.getConf() + .get(Constants.ENDPOINT, Constants.CENTRAL_ENDPOINT); + LOG.info("Test endpoint is {}", endpoint); } @Test public void testCustomSignerAndInitializer() throws IOException, InterruptedException { + final Path basePath = path(getMethodName()); UserGroupInformation ugi1 = UserGroupInformation.createRemoteUser("user1"); - FileSystem fs1 = runMkDirAndVerify(ugi1, "/customsignerpath1", "id1"); + FileSystem fs1 = runMkDirAndVerify(ugi1, + new Path(basePath, "customsignerpath1"), "id1"); UserGroupInformation ugi2 = UserGroupInformation.createRemoteUser("user2"); - FileSystem fs2 = runMkDirAndVerify(ugi2, "/customsignerpath2", "id2"); + FileSystem fs2 = runMkDirAndVerify(ugi2, + new Path(basePath, "customsignerpath2"), "id2"); Assertions.assertThat(CustomSignerInitializer.knownStores.size()) .as("Num registered stores mismatch").isEqualTo(2); @@ -91,20 +100,19 @@ public void testCustomSignerAndInitializer() } private FileSystem runMkDirAndVerify(UserGroupInformation ugi, - String pathString, String identifier) + Path finalPath, String identifier) throws IOException, InterruptedException { Configuration conf = createTestConfig(identifier); - Path path = new Path(pathString); - path = path.makeQualified(getFileSystem().getUri(), - getFileSystem().getWorkingDirectory()); - - Path finalPath = path; return ugi.doAs((PrivilegedExceptionAction) () -> { - int invocationCount = CustomSigner.invocationCount; + int instantiationCount = CustomSigner.getInstantiationCount(); + int invocationCount = CustomSigner.getInvocationCount(); FileSystem fs = finalPath.getFileSystem(conf); fs.mkdirs(finalPath); - Assertions.assertThat(CustomSigner.invocationCount) - .as("Invocation count lower than expected") + Assertions.assertThat(CustomSigner.getInstantiationCount()) + .as("CustomSigner Instantiation count lower than expected") + .isGreaterThan(instantiationCount); + Assertions.assertThat(CustomSigner.getInvocationCount()) + .as("CustomSigner Invocation count lower than expected") .isGreaterThan(invocationCount); Assertions.assertThat(CustomSigner.lastStoreValue) @@ -118,6 +126,12 @@ private FileSystem runMkDirAndVerify(UserGroupInformation ugi, }); } + /** + * Create a test conf with the custom signer; fixes up + * endpoint to be that of the test FS. + * @param identifier test key. + * @return a configuration for a filesystem. + */ private Configuration createTestConfig(String identifier) { Configuration conf = createConfiguration(); @@ -128,28 +142,38 @@ private Configuration createTestConfig(String identifier) { conf.set(TEST_ID_KEY, identifier); conf.set(TEST_REGION_KEY, regionName); + conf.set(Constants.ENDPOINT, endpoint); + // make absolutely sure there is no caching. + disableFilesystemCaching(conf); return conf; } private String determineRegion(String bucketName) throws IOException { - AmazonS3 s3 = AmazonS3ClientBuilder.standard().withCredentials( - new SimpleAWSCredentialsProvider(null, createConfiguration())) - .withForceGlobalBucketAccessEnabled(true).withRegion("us-east-1") - .build(); - String region = s3.getBucketLocation(bucketName); - return fixBucketRegion(region); + return getFileSystem().getBucketLocation(bucketName); } @Private public static final class CustomSigner implements Signer { - private static int invocationCount = 0; + + private static final AtomicInteger INSTANTIATION_COUNT = + new AtomicInteger(0); + private static final AtomicInteger INVOCATION_COUNT = + new AtomicInteger(0); + private static StoreValue lastStoreValue; + public CustomSigner() { + int c = INSTANTIATION_COUNT.incrementAndGet(); + LOG.info("Creating Signer #{}", c); + } + @Override public void sign(SignableRequest request, AWSCredentials credentials) { - invocationCount++; + int c = INVOCATION_COUNT.incrementAndGet(); + LOG.info("Signing request #{}", c); + String host = request.getEndpoint().getHost(); String bucketName = host.split("\\.")[0]; try { @@ -163,6 +187,14 @@ public void sign(SignableRequest request, AWSCredentials credentials) { realSigner.setRegionName(lastStoreValue.conf.get(TEST_REGION_KEY)); realSigner.sign(request, credentials); } + + public static int getInstantiationCount() { + return INSTANTIATION_COUNT.get(); + } + + public static int getInvocationCount() { + return INVOCATION_COUNT.get(); + } } @Private diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java index a8e7a57057605..402469eb3b736 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java @@ -410,8 +410,7 @@ public void checkBasicFileOperations() throws Throwable { // this is HEAD + "/" on S3; get on S3Guard auth when the path exists, - accessDeniedIf(!s3guard, () -> - readonlyFS.listStatus(emptyDir)); + readonlyFS.listStatus(emptyDir); // a recursive list of the no-read-directory works because // there is no directory marker, it becomes a LIST call. @@ -421,12 +420,7 @@ public void checkBasicFileOperations() throws Throwable { // and so working. readonlyFS.getFileStatus(noReadDir); - // empty dir checks work when guarded because even in non-auth mode - // there are no checks for directories being out of date - // without S3, the HEAD path + "/" is blocked - accessDeniedIf(!s3guard, () -> - readonlyFS.getFileStatus(emptyDir)); - + readonlyFS.getFileStatus(emptyDir); // now look at a file; the outcome depends on the mode. accessDeniedIf(!guardedInAuthMode, () -> readonlyFS.getFileStatus(subdirFile)); @@ -464,14 +458,14 @@ public void checkGlobOperations() throws Throwable { // a file fails if not in auth mode globFS(readonlyFS, subdirFile, null, !guardedInAuthMode, 1); // empty directories don't fail. - FileStatus[] st = globFS(readonlyFS, emptyDir, null, !s3guard, 1); + FileStatus[] st = globFS(readonlyFS, emptyDir, null, false, 1); if (s3guard) { assertStatusPathEquals(emptyDir, st); } st = globFS(readonlyFS, noReadWildcard, - null, !s3guard, 2); + null, false, 2); if (s3guard) { Assertions.assertThat(st) .extracting(FileStatus::getPath) @@ -481,12 +475,12 @@ public void checkGlobOperations() throws Throwable { // there is precisely one .docx file (subdir2File2.docx) globFS(readonlyFS, new Path(noReadDir, "*/*.docx"), - null, !s3guard, 1); + null, false, 1); // there are no .doc files. globFS(readonlyFS, new Path(noReadDir, "*/*.doc"), - null, !s3guard, 0); + null, false, 0); globFS(readonlyFS, noReadDir, EVERYTHING, false, 1); // and a filter without any wildcarded pattern only finds @@ -513,17 +507,14 @@ public void checkSingleThreadedLocatedFileStatus() throws Throwable { true, HIDDEN_FILE_FILTER, true); - accessDeniedIf(!s3guard, - () -> fetcher.getFileStatuses()) - .ifPresent(stats -> { - Assertions.assertThat(stats) - .describedAs("result of located scan").flatExtracting(FileStatus::getPath) - .containsExactlyInAnyOrder( - emptyFile, - subdirFile, - subdir2File1, - subdir2File2); - }); + Assertions.assertThat(fetcher.getFileStatuses()) + .describedAs("result of located scan") + .flatExtracting(FileStatus::getPath) + .containsExactlyInAnyOrder( + emptyFile, + subdirFile, + subdir2File1, + subdir2File2); } /** @@ -542,15 +533,11 @@ public void checkLocatedFileStatusFourThreads() throws Throwable { true, EVERYTHING, true); - accessDeniedIf(!s3guard, - () -> fetcher.getFileStatuses()) - .ifPresent(stats -> { - Assertions.assertThat(stats) - .describedAs("result of located scan") - .isNotNull() - .flatExtracting(FileStatus::getPath) - .containsExactlyInAnyOrder(subdirFile, subdir2File1); - }); + Assertions.assertThat(fetcher.getFileStatuses()) + .describedAs("result of located scan") + .isNotNull() + .flatExtracting(FileStatus::getPath) + .containsExactlyInAnyOrder(subdirFile, subdir2File1); } /** @@ -631,19 +618,16 @@ public void checkLocatedFileStatusNonexistentPath() throws Throwable { */ public void checkDeleteOperations() throws Throwable { describe("Testing delete operations"); - + readonlyFS.delete(emptyDir, true); if (!authMode) { - // unguarded or non-auth S3Guard to fail on HEAD + / - accessDenied(() -> readonlyFS.delete(emptyDir, true)); // to fail on HEAD accessDenied(() -> readonlyFS.delete(emptyFile, true)); } else { - // auth mode checks DDB for status and then issues the DELETE - readonlyFS.delete(emptyDir, true); + // checks DDB for status and then issues the DELETE readonlyFS.delete(emptyFile, true); } - // this will succeed for both as there is no subdir marker. + // this will succeed for both readonlyFS.delete(subDir, true); // after which it is not there fileNotFound(() -> readonlyFS.getFileStatus(subDir)); @@ -740,7 +724,7 @@ protected FileStatus[] globFS( S3ATestUtils.MetricDiff getMetric = new S3ATestUtils.MetricDiff(fs, Statistic.OBJECT_METADATA_REQUESTS); S3ATestUtils.MetricDiff listMetric = new S3ATestUtils.MetricDiff(fs, - Statistic.OBJECT_LIST_REQUESTS); + Statistic.OBJECT_LIST_REQUEST); FileStatus[] st; try { st = filter == null diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ProgressCounter.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ProgressCounter.java index 15a57152091b7..362e674e13a54 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ProgressCounter.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ProgressCounter.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs.s3a.auth; +import java.util.concurrent.atomic.AtomicLong; + import org.apache.hadoop.util.Progressable; import static org.junit.Assert.assertEquals; @@ -27,14 +29,14 @@ */ public class ProgressCounter implements Progressable { - private long count; + private final AtomicLong count = new AtomicLong(); public void progress() { - count++; + count.incrementAndGet(); } public long getCount() { - return count; + return count.get(); } public void assertCount(String message, int expected) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java index 7b3912bf61cd5..c3030aa227d2c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java @@ -27,7 +27,7 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java index f6c908ea2f7e6..26655de9d4417 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFileystem.java @@ -43,7 +43,9 @@ import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.fs.s3a.S3ClientFactory; import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext; import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.security.TokenCache; @@ -305,6 +307,9 @@ public void testDelegatedFileSystem() throws Throwable { describe("Delegation tokens can be passed to a new filesystem;" + " if role restricted, permissions are tightened."); S3AFileSystem fs = getFileSystem(); + // force a probe of the remote FS to make sure its endpoint is valid + // (this always hits S3, even when S3Guard is enabled) + fs.getObjectMetadata(new Path("/")); readLandsatMetadata(fs); URI uri = fs.getUri(); @@ -551,18 +556,22 @@ public void testDelegationBindingMismatch2() throws Throwable { */ protected ObjectMetadata readLandsatMetadata(final S3AFileSystem delegatedFS) throws Exception { - AWSCredentialProviderList testing + AWSCredentialProviderList testingCreds = delegatedFS.shareCredentials("testing"); URI landsat = new URI(DEFAULT_CSVTEST_FILE); DefaultS3ClientFactory factory = new DefaultS3ClientFactory(); - Configuration conf = new Configuration(delegatedFS.getConf()); - conf.set(ENDPOINT, ""); - factory.setConf(conf); + factory.setConf(new Configuration(delegatedFS.getConf())); String host = landsat.getHost(); - AmazonS3 s3 = factory.createS3Client(landsat, host, testing, - "ITestSessionDelegationInFileystem"); + S3ClientFactory.S3ClientCreationParameters parameters = null; + parameters = new S3ClientFactory.S3ClientCreationParameters() + .withCredentialSet(testingCreds) + .withEndpoint(DEFAULT_ENDPOINT) + .withMetrics(new EmptyS3AStatisticsContext() + .newStatisticsFromAwsSdk()) + .withUserAgentSuffix("ITestSessionDelegationInFileystem"); + AmazonS3 s3 = factory.createS3Client(landsat, parameters); return Invoker.once("HEAD", host, () -> s3.getObjectMetadata(host, landsat.getPath().substring(1))); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/MiniKerberizedHadoopCluster.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/MiniKerberizedHadoopCluster.java index c42372a4b3087..0a266dc3eaa6d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/MiniKerberizedHadoopCluster.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/MiniKerberizedHadoopCluster.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.util.Properties; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/TestS3ADelegationTokenSupport.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/TestS3ADelegationTokenSupport.java index 516022e7e5875..88d9ebfcdfdc3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/TestS3ADelegationTokenSupport.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/TestS3ADelegationTokenSupport.java @@ -38,6 +38,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; /** * Unit tests related to S3A DT support. @@ -58,6 +59,14 @@ public void testSessionTokenKind() throws Throwable { assertEquals(SESSION_TOKEN_KIND, identifier.getKind()); } + @Test + public void testSessionTokenIssueDate() throws Throwable { + AbstractS3ATokenIdentifier identifier + = new SessionTokenIdentifier(); + assertEquals(SESSION_TOKEN_KIND, identifier.getKind()); + assertTrue("issue date is not set", identifier.getIssueDate() > 0L); + } + @Test public void testSessionTokenDecode() throws Throwable { Text alice = new Text("alice"); @@ -90,6 +99,8 @@ public void testSessionTokenDecode() throws Throwable { UserGroupInformation.AuthenticationMethod.TOKEN, decodedUser.getAuthenticationMethod()); assertEquals("origin", decoded.getOrigin()); + assertEquals("issue date", identifier.getIssueDate(), + decoded.getIssueDate()); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java index 1cf3fb4a3f65f..1df4f9b9cc15c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java @@ -53,6 +53,7 @@ import static org.apache.hadoop.fs.s3a.MultipartTestUtils.listMultipartUploads; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; /** * Base test suite for committer operations. @@ -116,7 +117,7 @@ protected Configuration createConfiguration() { FS_S3A_COMMITTER_STAGING_UNIQUE_FILENAMES, FAST_UPLOAD_BUFFER); - conf.setBoolean(MAGIC_COMMITTER_ENABLED, true); + conf.setBoolean(MAGIC_COMMITTER_ENABLED, DEFAULT_MAGIC_COMMITTER_ENABLED); conf.setLong(MIN_MULTIPART_THRESHOLD, MULTIPART_MIN_SIZE); conf.setInt(MULTIPART_SIZE, MULTIPART_MIN_SIZE); conf.set(FAST_UPLOAD_BUFFER, FAST_UPLOAD_BUFFER_ARRAY); @@ -359,11 +360,13 @@ private String pathToPrefix(Path path) { * and that it can be loaded. * The contents will be logged and returned. * @param dir directory to scan + * @param jobId job ID, only verified if non-empty * @return the loaded success data * @throws IOException IO Failure */ - protected SuccessData verifySuccessMarker(Path dir) throws IOException { - return validateSuccessFile(dir, "", getFileSystem(), "query", 0); + protected SuccessData verifySuccessMarker(Path dir, String jobId) + throws IOException { + return validateSuccessFile(dir, "", getFileSystem(), "query", 0, jobId); } /** @@ -442,6 +445,7 @@ public static TaskAttemptContext taskAttemptForJob(JobId jobId, * @param fs filesystem * @param origin origin (e.g. "teragen" for messages) * @param minimumFileCount minimum number of files to have been created + * @param jobId job ID, only verified if non-empty * @return the success data * @throws IOException IO failure */ @@ -449,13 +453,16 @@ public static SuccessData validateSuccessFile(final Path outputPath, final String committerName, final S3AFileSystem fs, final String origin, - final int minimumFileCount) throws IOException { + final int minimumFileCount, + final String jobId) throws IOException { SuccessData successData = loadSuccessFile(fs, outputPath, origin); String commitDetails = successData.toString(); LOG.info("Committer name " + committerName + "\n{}", commitDetails); LOG.info("Committer statistics: \n{}", successData.dumpMetrics(" ", " = ", "\n")); + LOG.info("Job IOStatistics: \n{}", + ioStatisticsToString(successData.getIOStatistics())); LOG.info("Diagnostics\n{}", successData.dumpDiagnostics(" ", " = ", "\n")); if (!committerName.isEmpty()) { @@ -463,8 +470,13 @@ public static SuccessData validateSuccessFile(final Path outputPath, committerName, successData.getCommitter()); } Assertions.assertThat(successData.getFilenames()) - .describedAs("Files committed") + .describedAs("Files committed in " + commitDetails) .hasSizeGreaterThanOrEqualTo(minimumFileCount); + if (StringUtils.isNotEmpty(jobId)) { + Assertions.assertThat(successData.getJobId()) + .describedAs("JobID in " + commitDetails) + .isEqualTo(jobId); + } return successData; } @@ -477,7 +489,7 @@ public static SuccessData validateSuccessFile(final Path outputPath, * @throws IOException failure to find/load the file * @throws AssertionError file is 0-bytes long, */ - public static SuccessData loadSuccessFile(final S3AFileSystem fs, + public static SuccessData loadSuccessFile(final FileSystem fs, final Path outputPath, final String origin) throws IOException { ContractTestUtils.assertPathExists(fs, "Output directory " + outputPath @@ -496,7 +508,9 @@ public static SuccessData loadSuccessFile(final S3AFileSystem fs, + success + " from " + origin + "; an S3A committer was not used", status.getLen() > 0); - LOG.info("Loading committer success file {}", success); + String body = ContractTestUtils.readUTF8(fs, success, -1); + LOG.info("Loading committer success file {}. Actual contents=\n{}", success, + body); return SuccessData.load(fs, success); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java index cacd54d12e90c..14207e8359788 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.UUID; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -40,6 +41,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.commit.files.SuccessData; +import org.apache.hadoop.fs.s3a.commit.magic.MagicS3GuardCommitter; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.MapFile; @@ -68,7 +72,15 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.*; import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.s3a.commit.AbstractS3ACommitter.E_SELF_GENERATED_JOB_UUID; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.E_NO_SPARK_UUID; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.FS_S3A_COMMITTER_UUID; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.FS_S3A_COMMITTER_UUID_SOURCE; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.SPARK_WRITE_UUID; +import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_TASKS_SUCCEEDED; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticCounter; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; import static org.apache.hadoop.test.LambdaTestUtils.*; /** @@ -307,14 +319,19 @@ public AbstractS3ACommitter createCommitter(TaskAttemptContext context) * @param context task * @throws IOException IO failure * @throws InterruptedException write interrupted + * @return the path written to */ - protected void writeTextOutput(TaskAttemptContext context) + protected Path writeTextOutput(TaskAttemptContext context) throws IOException, InterruptedException { describe("write output"); try (DurationInfo d = new DurationInfo(LOG, "Writing Text output for task %s", context.getTaskAttemptID())) { - writeOutput(new LoggingTextOutputFormat().getRecordWriter(context), + LoggingTextOutputFormat.LoggingLineRecordWriter + recordWriter = new LoggingTextOutputFormat<>().getRecordWriter( context); + writeOutput(recordWriter, + context); + return recordWriter.getDest(); } } @@ -371,6 +388,7 @@ public static class JobData { private final TaskAttemptContext tContext; private final AbstractS3ACommitter committer; private final Configuration conf; + private Path writtenTextPath; // null if not written to public JobData(Job job, JobContext jContext, @@ -461,7 +479,7 @@ protected JobData startJob(CommitterFactory factory, boolean writeText) if (writeText) { // write output - writeTextOutput(tContext); + jobData.writtenTextPath = writeTextOutput(tContext); } return jobData; } @@ -480,11 +498,17 @@ protected void setup(JobData jobData) throws IOException { "setup job %s", jContext.getJobID())) { committer.setupJob(jContext); } + setupCommitter(committer, tContext); + describe("setup complete\n"); + } + + private void setupCommitter( + final AbstractS3ACommitter committer, + final TaskAttemptContext tContext) throws IOException { try (DurationInfo d = new DurationInfo(LOG, "setup task %s", tContext.getTaskAttemptID())) { committer.setupTask(tContext); } - describe("setup complete\n"); } /** @@ -647,12 +671,14 @@ protected void assertJobAttemptPathDoesNotExist( * file existence and contents, as well as optionally, the success marker. * @param dir directory to scan. * @param expectSuccessMarker check the success marker? + * @param expectedJobId job ID, verified if non-empty and success data loaded * @throws Exception failure. */ - private void validateContent(Path dir, boolean expectSuccessMarker) - throws Exception { + private void validateContent(Path dir, + boolean expectSuccessMarker, + String expectedJobId) throws Exception { if (expectSuccessMarker) { - verifySuccessMarker(dir); + SuccessData successData = verifySuccessMarker(dir, expectedJobId); } Path expectedFile = getPart0000(dir); log().debug("Validating content in {}", expectedFile); @@ -781,7 +807,8 @@ public void testCommitLifecycle() throws Exception { // validate output describe("4. Validating content"); - validateContent(outDir, shouldExpectSuccessMarker()); + validateContent(outDir, shouldExpectSuccessMarker(), + committer.getUUID()); assertNoMultipartUploadsPending(outDir); } @@ -798,7 +825,8 @@ public void testCommitterWithDuplicatedCommit() throws Exception { commit(committer, jContext, tContext); // validate output - validateContent(outDir, shouldExpectSuccessMarker()); + validateContent(outDir, shouldExpectSuccessMarker(), + committer.getUUID()); assertNoMultipartUploadsPending(outDir); @@ -806,6 +834,75 @@ public void testCommitterWithDuplicatedCommit() throws Exception { expectFNFEonTaskCommit(committer, tContext); } + /** + * HADOOP-17258. If a second task attempt is committed, it + * must succeed, and the output of the first TA, even if already + * committed, MUST NOT be visible in the final output. + *

    + * What's important is not just that only one TA must succeed, + * but it must be the last one executed. Why? because that's + * the one + */ + @Test + public void testTwoTaskAttemptsCommit() throws Exception { + describe("Commit two task attempts;" + + " expect the second attempt to succeed."); + JobData jobData = startJob(false); + JobContext jContext = jobData.jContext; + TaskAttemptContext tContext = jobData.tContext; + AbstractS3ACommitter committer = jobData.committer; + // do commit + describe("\ncommitting task"); + // write output for TA 1, + Path outputTA1 = writeTextOutput(tContext); + + // speculatively execute committer 2. + + // jobconf with a different base to its parts. + Configuration conf2 = jobData.conf; + conf2.set("mapreduce.output.basename", "attempt2"); + String attempt2 = "attempt_" + jobId + "_m_000000_1"; + TaskAttemptID ta2 = TaskAttemptID.forName(attempt2); + TaskAttemptContext tContext2 = new TaskAttemptContextImpl( + conf2, ta2); + + AbstractS3ACommitter committer2 = standardCommitterFactory + .createCommitter(tContext2); + setupCommitter(committer2, tContext2); + // write output for TA 2, + Path outputTA2 = writeTextOutput(tContext2); + + // verify the names are different. + String name1 = outputTA1.getName(); + String name2 = outputTA2.getName(); + Assertions.assertThat(name1) + .describedAs("name of task attempt output %s", outputTA1) + .isNotEqualTo(name2); + + // commit task 1 + committer.commitTask(tContext); + + // then pretend that task1 didn't respond, so + // commit task 2 + committer2.commitTask(tContext2); + + // and the job + committer2.commitJob(tContext); + + // validate output + S3AFileSystem fs = getFileSystem(); + SuccessData successData = validateSuccessFile(outDir, "", fs, "query", 1, + ""); + Assertions.assertThat(successData.getFilenames()) + .describedAs("Files committed") + .hasSize(1); + + assertPathExists("attempt2 output", new Path(outDir, name2)); + assertPathDoesNotExist("attempt1 output", new Path(outDir, name1)); + + assertNoMultipartUploadsPending(outDir); + } + protected boolean shouldExpectSuccessMarker() { return true; } @@ -831,7 +928,8 @@ public void testCommitterWithFailure() throws Exception { commitJob(committer, jContext); // but the data got there, due to the order of operations. - validateContent(outDir, shouldExpectSuccessMarker()); + validateContent(outDir, shouldExpectSuccessMarker(), + committer.getUUID()); expectJobCommitToFail(jContext, committer); } @@ -927,7 +1025,7 @@ public void testMapFileOutputCommitter() throws Exception { describe("\nvalidating"); // validate output - verifySuccessMarker(outDir); + verifySuccessMarker(outDir, committer.getUUID()); describe("validate output of %s", outDir); validateMapFileOutputContent(fs, outDir); @@ -1189,7 +1287,7 @@ public Path getDefaultWorkFile( // validate output // There's no success marker in the subdirectory - validateContent(outSubDir, false); + validateContent(outSubDir, false, ""); } /** @@ -1237,17 +1335,37 @@ public void testOutputFormatIntegration() throws Throwable { = outputFormat.getRecordWriter(tContext); IntWritable iw = new IntWritable(1); recordWriter.write(iw, iw); + long expectedLength = 4; Path dest = recordWriter.getDest(); - validateTaskAttemptPathDuringWrite(dest); + validateTaskAttemptPathDuringWrite(dest, expectedLength); recordWriter.close(tContext); // at this point - validateTaskAttemptPathAfterWrite(dest); + validateTaskAttemptPathAfterWrite(dest, expectedLength); assertTrue("Committer does not have data to commit " + committer, committer.needsTaskCommit(tContext)); commitTask(committer, tContext); + // at this point the committer tasks stats should be current. + IOStatisticsSnapshot snapshot = new IOStatisticsSnapshot( + committer.getIOStatistics()); + String commitsCompleted = COMMITTER_TASKS_SUCCEEDED.getSymbol(); + assertThatStatisticCounter(snapshot, commitsCompleted) + .describedAs("task commit count") + .isEqualTo(1L); + + commitJob(committer, jContext); + LOG.info("committer iostatistics {}", + ioStatisticsSourceToString(committer)); + // validate output - verifySuccessMarker(outDir); + SuccessData successData = verifySuccessMarker(outDir, committer.getUUID()); + + // the task commit count should get through the job commit + IOStatisticsSnapshot successStats = successData.getIOStatistics(); + LOG.info("loaded statistics {}", successStats); + assertThatStatisticCounter(successStats, commitsCompleted) + .describedAs("task commit count") + .isEqualTo(1L); } /** @@ -1307,7 +1425,9 @@ public void testParallelJobsToAdjacentPaths() throws Throwable { assertNotEquals(job1Dest, job2Dest); // create the second job - Job job2 = newJob(job2Dest, new JobConf(getConfiguration()), attempt20); + Job job2 = newJob(job2Dest, + unsetUUIDOptions(new JobConf(getConfiguration())), + attempt20); Configuration conf2 = job2.getConfiguration(); conf2.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 1); try { @@ -1320,7 +1440,13 @@ public void testParallelJobsToAdjacentPaths() throws Throwable { setup(jobData2); abortInTeardown(jobData2); // make sure the directories are different - assertEquals(job2Dest, committer2.getOutputPath()); + assertNotEquals("Committer output paths", + committer1.getOutputPath(), + committer2.getOutputPath()); + + assertNotEquals("job UUIDs", + committer1.getUUID(), + committer2.getUUID()); // job2 setup, write some data there writeTextOutput(tContext2); @@ -1350,6 +1476,259 @@ public void testParallelJobsToAdjacentPaths() throws Throwable { } + + /** + * Run two jobs with the same destination and different output paths. + *

    + * This only works if the jobs are set to NOT delete all outstanding + * uploads under the destination path. + *

    + * See HADOOP-17318. + */ + @Test + public void testParallelJobsToSameDestination() throws Throwable { + + describe("Run two jobs to the same destination, assert they both complete"); + Configuration conf = getConfiguration(); + conf.setBoolean(FS_S3A_COMMITTER_ABORT_PENDING_UPLOADS, false); + + // this job has a job ID generated and set as the spark UUID; + // the config is also set to require it. + // This mimics the Spark setup process. + + String stage1Id = UUID.randomUUID().toString(); + conf.set(SPARK_WRITE_UUID, stage1Id); + conf.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true); + + // create the job and write data in its task attempt + JobData jobData = startJob(true); + Job job1 = jobData.job; + AbstractS3ACommitter committer1 = jobData.committer; + JobContext jContext1 = jobData.jContext; + TaskAttemptContext tContext1 = jobData.tContext; + Path job1TaskOutputFile = jobData.writtenTextPath; + + // the write path + Assertions.assertThat(committer1.getWorkPath().toString()) + .describedAs("Work path path of %s", committer1) + .contains(stage1Id); + // now build up a second job + String jobId2 = randomJobId(); + + // second job will use same ID + String attempt2 = taskAttempt0.toString(); + TaskAttemptID taskAttempt2 = taskAttempt0; + + // create the second job + Configuration c2 = unsetUUIDOptions(new JobConf(conf)); + c2.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true); + Job job2 = newJob(outDir, + c2, + attempt2); + Configuration jobConf2 = job2.getConfiguration(); + jobConf2.set("mapreduce.output.basename", "task2"); + String stage2Id = UUID.randomUUID().toString(); + jobConf2.set(SPARK_WRITE_UUID, + stage2Id); + + JobContext jContext2 = new JobContextImpl(jobConf2, + taskAttempt2.getJobID()); + TaskAttemptContext tContext2 = + new TaskAttemptContextImpl(jobConf2, taskAttempt2); + AbstractS3ACommitter committer2 = createCommitter(outDir, tContext2); + Assertions.assertThat(committer2.getJobAttemptPath(jContext2)) + .describedAs("Job attempt path of %s", committer2) + .isNotEqualTo(committer1.getJobAttemptPath(jContext1)); + Assertions.assertThat(committer2.getTaskAttemptPath(tContext2)) + .describedAs("Task attempt path of %s", committer2) + .isNotEqualTo(committer1.getTaskAttemptPath(tContext1)); + Assertions.assertThat(committer2.getWorkPath().toString()) + .describedAs("Work path path of %s", committer2) + .isNotEqualTo(committer1.getWorkPath().toString()) + .contains(stage2Id); + Assertions.assertThat(committer2.getUUIDSource()) + .describedAs("UUID source of %s", committer2) + .isEqualTo(AbstractS3ACommitter.JobUUIDSource.SparkWriteUUID); + JobData jobData2 = new JobData(job2, jContext2, tContext2, committer2); + setup(jobData2); + abortInTeardown(jobData2); + + // the sequence is designed to ensure that job2 has active multipart + // uploads during/after job1's work + + // if the committer is a magic committer, MPUs start in the write, + // otherwise in task commit. + boolean multipartInitiatedInWrite = + committer2 instanceof MagicS3GuardCommitter; + + // job2. Here we start writing a file and have that write in progress + // when job 1 commits. + + LoggingTextOutputFormat.LoggingLineRecordWriter + recordWriter2 = new LoggingTextOutputFormat<>().getRecordWriter( + tContext2); + + LOG.info("Commit Task 1"); + commitTask(committer1, tContext1); + + if (multipartInitiatedInWrite) { + // magic committer runs -commit job1 while a job2 TA has an open + // writer (and hence: open MP Upload) + LOG.info("With Multipart Initiated In Write: Commit Job 1"); + commitJob(committer1, jContext1); + } + + // job2/task writes its output to the destination and + // closes the file + writeOutput(recordWriter2, tContext2); + + // get the output file + Path job2TaskOutputFile = recordWriter2.getDest(); + + + // commit the second task + LOG.info("Commit Task 2"); + commitTask(committer2, tContext2); + + if (!multipartInitiatedInWrite) { + // if not a magic committer, commit the job now. Because at + // this point the staging committer tasks from job2 will be pending + LOG.info("With Multipart NOT Initiated In Write: Commit Job 1"); + assertJobAttemptPathExists(committer1, jContext1); + commitJob(committer1, jContext1); + } + + // run the warning scan code, which will find output. + // this can be manually reviewed in the logs to verify + // readability + committer2.warnOnActiveUploads(outDir); + // and second job + LOG.info("Commit Job 2"); + assertJobAttemptPathExists(committer2, jContext2); + commitJob(committer2, jContext2); + + // validate the output + Path job1Output = new Path(outDir, job1TaskOutputFile.getName()); + Path job2Output = new Path(outDir, job2TaskOutputFile.getName()); + assertNotEquals("Job output file filenames must be different", + job1Output, job2Output); + + // job1 output must be there + assertPathExists("job 1 output", job1Output); + // job 2 file is there + assertPathExists("job 2 output", job2Output); + + // and nothing is pending + assertNoMultipartUploadsPending(outDir); + + } + + /** + * Verify self-generated UUID logic. + * A committer used for job setup can also use it for task setup, + * but a committer which generated a job ID but was only + * used for task setup -that is rejected. + * Task abort will still work. + */ + @Test + public void testSelfGeneratedUUID() throws Throwable { + describe("Run two jobs to the same destination, assert they both complete"); + Configuration conf = getConfiguration(); + + unsetUUIDOptions(conf); + // job is set to generate UUIDs + conf.setBoolean(FS_S3A_COMMITTER_GENERATE_UUID, true); + + // create the job. don't write anything + JobData jobData = startJob(false); + AbstractS3ACommitter committer = jobData.committer; + String uuid = committer.getUUID(); + Assertions.assertThat(committer.getUUIDSource()) + .describedAs("UUID source of %s", committer) + .isEqualTo(AbstractS3ACommitter.JobUUIDSource.GeneratedLocally); + + // examine the job configuration and verify that it has been updated + Configuration jobConf = jobData.conf; + Assertions.assertThat(jobConf.get(FS_S3A_COMMITTER_UUID, null)) + .describedAs("Config option " + FS_S3A_COMMITTER_UUID) + .isEqualTo(uuid); + Assertions.assertThat(jobConf.get(FS_S3A_COMMITTER_UUID_SOURCE, null)) + .describedAs("Config option " + FS_S3A_COMMITTER_UUID_SOURCE) + .isEqualTo(AbstractS3ACommitter.JobUUIDSource.GeneratedLocally + .getText()); + + // because the task was set up in the job, it can have task + // setup called, even though it had a random ID. + committer.setupTask(jobData.tContext); + + // but a new committer will not be set up + TaskAttemptContext tContext2 = + new TaskAttemptContextImpl(conf, taskAttempt1); + AbstractS3ACommitter committer2 = createCommitter(outDir, tContext2); + Assertions.assertThat(committer2.getUUIDSource()) + .describedAs("UUID source of %s", committer2) + .isEqualTo(AbstractS3ACommitter.JobUUIDSource.GeneratedLocally); + assertNotEquals("job UUIDs", + committer.getUUID(), + committer2.getUUID()); + // Task setup MUST fail. + intercept(PathCommitException.class, + E_SELF_GENERATED_JOB_UUID, () -> { + committer2.setupTask(tContext2); + return committer2; + }); + // task abort with the self-generated option is fine. + committer2.abortTask(tContext2); + } + + /** + * Verify the option to require a UUID applies and + * when a committer is instantiated without those options, + * it fails early. + */ + @Test + public void testRequirePropagatedUUID() throws Throwable { + Configuration conf = getConfiguration(); + + unsetUUIDOptions(conf); + conf.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true); + conf.setBoolean(FS_S3A_COMMITTER_GENERATE_UUID, true); + + // create the job, expect a failure, even if UUID generation + // is enabled. + intercept(PathCommitException.class, E_NO_SPARK_UUID, () -> + startJob(false)); + } + + /** + * Strip staging/spark UUID options. + * @param conf config + * @return the patched config + */ + protected Configuration unsetUUIDOptions(final Configuration conf) { + conf.unset(SPARK_WRITE_UUID); + conf.unset(FS_S3A_COMMITTER_UUID); + conf.unset(FS_S3A_COMMITTER_GENERATE_UUID); + conf.unset(FS_S3A_COMMITTER_REQUIRE_UUID); + return conf; + } + + /** + * Assert that a committer's job attempt path exists. + * For the staging committers, this is in the cluster FS. + * @param committer committer + * @param jobContext job context + * @throws IOException failure + */ + protected void assertJobAttemptPathExists( + final AbstractS3ACommitter committer, + final JobContext jobContext) throws IOException { + Path attemptPath = committer.getJobAttemptPath(jobContext); + ContractTestUtils.assertIsDirectory( + attemptPath.getFileSystem(committer.getConf()), + attemptPath); + } + @Test public void testS3ACommitterFactoryBinding() throws Throwable { describe("Verify that the committer factory returns this " @@ -1372,9 +1751,11 @@ public void testS3ACommitterFactoryBinding() throws Throwable { * Validate the path of a file being written to during the write * itself. * @param p path + * @param expectedLength * @throws IOException IO failure */ - protected void validateTaskAttemptPathDuringWrite(Path p) throws IOException { + protected void validateTaskAttemptPathDuringWrite(Path p, + final long expectedLength) throws IOException { } @@ -1382,9 +1763,11 @@ protected void validateTaskAttemptPathDuringWrite(Path p) throws IOException { * Validate the path of a file being written to after the write * operation has completed. * @param p path + * @param expectedLength * @throws IOException IO failure */ - protected void validateTaskAttemptPathAfterWrite(Path p) throws IOException { + protected void validateTaskAttemptPathAfterWrite(Path p, + final long expectedLength) throws IOException { } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java index 783c62686bad7..1b752a8db97f1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java @@ -39,7 +39,7 @@ import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; import static org.apache.hadoop.fs.s3a.S3ATestUtils.deployService; import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestPropertyBool; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java index d199337df1fe9..4d7f81d019b74 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java @@ -25,7 +25,8 @@ import java.util.List; import com.amazonaws.services.s3.model.PartETag; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.assertj.core.api.Assertions; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,6 +53,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; +import static org.apache.hadoop.fs.s3a.commit.CommitOperations.extractMagicFileLength; import static org.apache.hadoop.fs.s3a.commit.CommitUtils.*; import static org.apache.hadoop.fs.s3a.commit.MagicCommitPaths.*; import static org.apache.hadoop.fs.s3a.Constants.*; @@ -173,6 +175,7 @@ public void testCreateAbortEmptyFile() throws Throwable { Path destFile = methodPath(filename); Path pendingFilePath = makeMagic(destFile); touch(fs, pendingFilePath); + waitForConsistency(); validateIntermediateAndFinalPaths(pendingFilePath, destFile); Path pendingDataPath = validatePendingCommitData(filename, pendingFilePath); @@ -216,13 +219,13 @@ private static Path makeMagic(Path destFile) { @Test public void testCommitEmptyFile() throws Throwable { - describe("create then commit an empty file"); + describe("create then commit an empty magic file"); createCommitAndVerify("empty-commit.txt", new byte[0]); } @Test public void testCommitSmallFile() throws Throwable { - describe("create then commit an empty file"); + describe("create then commit a small magic file"); createCommitAndVerify("small-commit.txt", DATASET); } @@ -288,6 +291,64 @@ public void testBaseRelativePath() throws Throwable { commit("child.txt", pendingChildPath, expectedDestPath, 0, 0); } + /** + * Verify that that when a marker file is renamed, its + * magic marker attribute is lost. + */ + @Test + public void testMarkerFileRename() + throws Exception { + S3AFileSystem fs = getFileSystem(); + Path destFile = methodPath(); + Path destDir = destFile.getParent(); + fs.delete(destDir, true); + Path magicDest = makeMagic(destFile); + Path magicDir = magicDest.getParent(); + fs.mkdirs(magicDir); + + // use the builder API to verify it works exactly the + // same. + try (FSDataOutputStream stream = fs.createFile(magicDest) + .overwrite(true) + .recursive() + .build()) { + assertIsMagicStream(stream); + stream.write(DATASET); + } + Path magic2 = new Path(magicDir, "magic2"); + // rename the marker + fs.rename(magicDest, magic2); + + // the renamed file has no header + Assertions.assertThat(extractMagicFileLength(fs, magic2)) + .describedAs("XAttribute " + XA_MAGIC_MARKER + " of " + magic2) + .isEmpty(); + // abort the upload, which is driven by the .pending files + // there must be 1 deleted file; during test debugging with aborted + // runs there may be more. + Assertions.assertThat(newCommitOperations() + .abortPendingUploadsUnderPath(destDir)) + .describedAs("Aborting all pending uploads under %s", destDir) + .isGreaterThanOrEqualTo(1); + } + + /** + * Assert that an output stream is magic. + * @param stream stream to probe. + */ + protected void assertIsMagicStream(final FSDataOutputStream stream) { + Assertions.assertThat(stream.hasCapability(STREAM_CAPABILITY_MAGIC_OUTPUT)) + .describedAs("Stream capability %s in stream %s", + STREAM_CAPABILITY_MAGIC_OUTPUT, stream) + .isTrue(); + } + + /** + * Create a file through the magic commit mechanism. + * @param filename file to create (with __magic path.) + * @param data data to write + * @throws Exception failure + */ private void createCommitAndVerify(String filename, byte[] data) throws Exception { S3AFileSystem fs = getFileSystem(); @@ -295,19 +356,30 @@ private void createCommitAndVerify(String filename, byte[] data) fs.delete(destFile.getParent(), true); Path magicDest = makeMagic(destFile); assertPathDoesNotExist("Magic file should not exist", magicDest); + long dataSize = data != null ? data.length : 0; try(FSDataOutputStream stream = fs.create(magicDest, true)) { - assertTrue(stream.hasCapability(STREAM_CAPABILITY_MAGIC_OUTPUT)); - if (data != null && data.length > 0) { + assertIsMagicStream(stream); + if (dataSize > 0) { stream.write(data); } stream.close(); } FileStatus status = getFileStatusEventually(fs, magicDest, CONSISTENCY_WAIT); - assertEquals("Non empty marker file: " + status, 0, status.getLen()); - + assertEquals("Magic marker file is not zero bytes: " + status, + 0, 0); + Assertions.assertThat(extractMagicFileLength(fs, + magicDest)) + .describedAs("XAttribute " + XA_MAGIC_MARKER + " of " + magicDest) + .isNotEmpty() + .hasValue(dataSize); commit(filename, destFile, HIGH_THROTTLE, 0); verifyFileContents(fs, destFile, data); + // the destination file doesn't have the attribute + Assertions.assertThat(extractMagicFileLength(fs, + destFile)) + .describedAs("XAttribute " + XA_MAGIC_MARKER + " of " + destFile) + .isEmpty(); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/LoggingTextOutputFormat.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/LoggingTextOutputFormat.java index 1ac8038895200..5d1e919d917db 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/LoggingTextOutputFormat.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/LoggingTextOutputFormat.java @@ -66,7 +66,7 @@ public LoggingLineRecordWriter getRecordWriter(TaskAttemptContext job) } Path file = getDefaultWorkFile(job, extension); FileSystem fs = file.getFileSystem(conf); - FSDataOutputStream fileOut = fs.create(file, false); + FSDataOutputStream fileOut = fs.create(file, true); LOG.debug("Creating LineRecordWriter with destination {}", file); if (isCompressed) { return new LoggingLineRecordWriter<>( diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/MiniDFSClusterService.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/MiniDFSClusterService.java index 7f689e0d3d2d1..6e6ecd1a46144 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/MiniDFSClusterService.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/MiniDFSClusterService.java @@ -39,7 +39,6 @@ public MiniDFSClusterService() { @Override protected void serviceInit(Configuration conf) throws Exception { - conf.setBoolean("dfs.webhdfs.enabled", false); super.serviceInit(conf); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java index 47d112ddc5930..073922cbc0e5f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java @@ -22,7 +22,7 @@ import java.util.Arrays; import java.util.List; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.junit.Assert; import org.junit.Test; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestTasks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestTasks.java index 4ee39f1bfa08e..7ff5c3d280938 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestTasks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestTasks.java @@ -25,12 +25,13 @@ import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -57,6 +58,12 @@ public class TestTasks extends HadoopTestBase { * Thread pool for task execution. */ private ExecutorService threadPool; + + /** + * Task submitter bonded to the thread pool, or + * null for the 0-thread case. + */ + Tasks.Submitter submitter; private final CounterTask failingTask = new CounterTask("failing committer", FAILPOINT, Item::commit); @@ -117,6 +124,9 @@ public void setup() { .setDaemon(true) .setNameFormat(getMethodName() + "-pool-%d") .build()); + submitter = new PoolSubmitter(); + } else { + submitter = null; } } @@ -129,12 +139,21 @@ public void teardown() { } } + private class PoolSubmitter implements Tasks.Submitter { + + @Override + public Future submit(final Runnable task) { + return threadPool.submit(task); + } + + } + /** * create the builder. * @return pre-inited builder */ private Tasks.Builder builder() { - return Tasks.foreach(items).executeWith(threadPool); + return Tasks.foreach(items).executeWith(submitter); } private void assertRun(Tasks.Builder builder, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java index caf54d1c36099..79838d6f00b46 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java @@ -34,7 +34,7 @@ import java.util.UUID; import java.util.stream.Collectors; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.assertj.core.api.Assertions; import org.junit.FixMethodOrder; import org.junit.Rule; @@ -77,10 +77,9 @@ import static org.apache.hadoop.fs.s3a.commit.CommitConstants.FS_S3A_COMMITTER_STAGING_TMP_PATH; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.MAGIC; import static org.apache.hadoop.fs.s3a.commit.CommitConstants._SUCCESS; -import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.FS_S3A_COMMITTER_STAGING_UUID; +import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.FS_S3A_COMMITTER_UUID; import static org.apache.hadoop.fs.s3a.commit.staging.Paths.getMultipartUploadCommitsDirectory; import static org.apache.hadoop.fs.s3a.commit.staging.StagingCommitterConstants.STAGING_UPLOADS; -import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Test an MR Job with all the different committers. @@ -254,7 +253,7 @@ public void test_200_execute() throws Exception { jobConf.set("mock-results-file", committerPath); // setting up staging options is harmless for other committers - jobConf.set(FS_S3A_COMMITTER_STAGING_UUID, commitUUID); + jobConf.set(FS_S3A_COMMITTER_UUID, commitUUID); mrJob.setInputFormatClass(TextInputFormat.class); FileInputFormat.addInputPath(mrJob, @@ -310,7 +309,8 @@ public void test_200_execute() throws Exception { committerName(), fs, "MR job " + jobID, - 1); + 1, + ""); String commitData = successData.toString(); FileStatus[] results = fs.listStatus(outputPath, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java index 057adf5341b65..2b2fc2bb446d7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java @@ -20,21 +20,30 @@ import java.io.IOException; import java.net.URI; +import java.util.List; + +import org.assertj.core.api.Assertions; +import org.junit.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.commit.AbstractITCommitProtocol; import org.apache.hadoop.fs.s3a.commit.AbstractS3ACommitter; import org.apache.hadoop.fs.s3a.commit.CommitConstants; +import org.apache.hadoop.fs.s3a.commit.CommitOperations; import org.apache.hadoop.fs.s3a.commit.CommitUtils; import org.apache.hadoop.fs.s3a.commit.CommitterFaultInjection; import org.apache.hadoop.fs.s3a.commit.CommitterFaultInjectionImpl; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; +import static org.apache.hadoop.fs.s3a.S3AUtils.listAndFilter; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; import static org.hamcrest.CoreMatchers.containsString; @@ -57,13 +66,6 @@ public boolean useInconsistentClient() { return false; } - @Override - protected Configuration createConfiguration() { - Configuration conf = super.createConfiguration(); - conf.setBoolean(MAGIC_COMMITTER_ENABLED, true); - return conf; - } - @Override protected String getCommitterFactoryName() { return CommitConstants.S3A_COMMITTER_FACTORY; @@ -91,30 +93,56 @@ public void assertJobAbortCleanedUp(JobData jobData) } @Override - protected AbstractS3ACommitter createCommitter( + protected MagicS3GuardCommitter createCommitter( Path outputPath, TaskAttemptContext context) throws IOException { return new MagicS3GuardCommitter(outputPath, context); } - public AbstractS3ACommitter createFailingCommitter( + public MagicS3GuardCommitter createFailingCommitter( TaskAttemptContext tContext) throws IOException { return new CommitterWithFailedThenSucceed(getOutDir(), tContext); } - protected void validateTaskAttemptPathDuringWrite(Path p) throws IOException { + protected void validateTaskAttemptPathDuringWrite(Path p, + final long expectedLength) throws IOException { String pathStr = p.toString(); assertTrue("not magic " + pathStr, pathStr.contains(MAGIC)); assertPathDoesNotExist("task attempt visible", p); } - protected void validateTaskAttemptPathAfterWrite(Path p) throws IOException { - FileStatus st = getFileSystem().getFileStatus(p); - assertEquals("file length in " + st, 0, st.getLen()); - Path pendingFile = new Path(p.toString() + PENDING_SUFFIX); + protected void validateTaskAttemptPathAfterWrite(Path marker, + final long expectedLength) throws IOException { + // the pending file exists + Path pendingFile = new Path(marker.toString() + PENDING_SUFFIX); assertPathExists("pending file", pendingFile); + S3AFileSystem fs = getFileSystem(); + + // THIS SEQUENCE MUST BE RUN IN ORDER ON A S3GUARDED + // STORE + // if you list the parent dir and find the marker, it + // is really 0 bytes long + String name = marker.getName(); + List filtered = listAndFilter(fs, + marker.getParent(), false, + (path) -> path.getName().equals(name)); + Assertions.assertThat(filtered) + .hasSize(1); + Assertions.assertThat(filtered.get(0)) + .matches(lst -> lst.getLen() == 0, + "Listing should return 0 byte length"); + + // marker file is empty + FileStatus st = fs.getFileStatus(marker); + assertEquals("file length in " + st, 0, st.getLen()); + // xattr header + Assertions.assertThat(CommitOperations.extractMagicFileLength(fs, + marker)) + .describedAs("XAttribute " + XA_MAGIC_MARKER) + .isNotEmpty() + .hasValue(expectedLength); } /** @@ -136,6 +164,41 @@ protected void validateTaskAttemptWorkingDirectory( containsString('/' + CommitConstants.MAGIC + '/')); } + /** + * Verify that the __magic path for the application/tasks use the + * committer UUID to ensure uniqueness in the case of more than + * one job writing to the same destination path. + */ + @Test + public void testCommittersPathsHaveUUID() throws Throwable { + TaskAttemptContext tContext = new TaskAttemptContextImpl( + getConfiguration(), + getTaskAttempt0()); + MagicS3GuardCommitter committer = createCommitter(getOutDir(), tContext); + + String ta0 = getTaskAttempt0().toString(); + // magic path for the task attempt + Path taskAttemptPath = committer.getTaskAttemptPath(tContext); + Assertions.assertThat(taskAttemptPath.toString()) + .describedAs("task path of %s", committer) + .contains(committer.getUUID()) + .contains(MAGIC) + .doesNotContain(TEMP_DATA) + .endsWith(BASE) + .contains(ta0); + + // temp path for files which the TA will create with an absolute path + // and which need renaming into place. + Path tempTaskAttemptPath = committer.getTempTaskAttemptPath(tContext); + Assertions.assertThat(tempTaskAttemptPath.toString()) + .describedAs("Temp task path of %s", committer) + .contains(committer.getUUID()) + .contains(TEMP_DATA) + .doesNotContain(MAGIC) + .doesNotContain(BASE) + .contains(ta0); + } + /** * The class provides a overridden implementation of commitJobInternal which * causes the commit failed for the first time then succeed. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java index 9a2ad0ee9da10..3c15454e7edfb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java @@ -26,7 +26,6 @@ import org.slf4j.LoggerFactory; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; @@ -83,17 +82,6 @@ public String getTestSuiteName() { return "ITestS3AHugeMagicCommits"; } - /** - * Create the scale IO conf with the committer enabled. - * @return the configuration to use for the test FS. - */ - @Override - protected Configuration createScaleConfiguration() { - Configuration conf = super.createScaleConfiguration(); - conf.setBoolean(MAGIC_COMMITTER_ENABLED, true); - return conf; - } - @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/MockedStagingCommitter.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/MockedStagingCommitter.java index 47383b78a4d3b..d3da8185c8d65 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/MockedStagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/MockedStagingCommitter.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.s3a.MockS3AFileSystem; import org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.ClientErrors; import org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.ClientResults; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.TaskAttemptContext; @@ -76,7 +77,8 @@ public void commitJob(JobContext context) throws IOException { @Override protected void maybeCreateSuccessMarker(JobContext context, - List filenames) + List filenames, + final IOStatisticsSnapshot ioStatistics) throws IOException { //skipped } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java index f368bf25c77c7..4e425583a3cfd 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java @@ -44,8 +44,8 @@ import com.amazonaws.services.s3.model.MultipartUploadListing; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; @@ -341,7 +341,7 @@ public void setupJob() throws Exception { protected JobConf createJobConf() { JobConf conf = new JobConf(); - conf.set(InternalCommitterConstants.FS_S3A_COMMITTER_STAGING_UUID, + conf.set(InternalCommitterConstants.FS_S3A_COMMITTER_UUID, UUID.randomUUID().toString()); conf.setBoolean( CommitConstants.CREATE_SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, @@ -401,7 +401,7 @@ public void setupTask() throws Exception { // get the task's configuration copy so modifications take effect String tmp = System.getProperty( - StagingCommitterConstants.JAVA_IO_TMPDIR); + InternalCommitterConstants.JAVA_IO_TMPDIR); tempDir = new File(tmp); tac.getConfiguration().set(Constants.BUFFER_DIR, tmp + "/buffer"); tac.getConfiguration().set( diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestDirectoryCommitterScale.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestDirectoryCommitterScale.java index 6d93e5fa788ff..cb7202b10d133 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestDirectoryCommitterScale.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestDirectoryCommitterScale.java @@ -28,7 +28,7 @@ import java.util.stream.IntStream; import com.amazonaws.services.s3.model.PartETag; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.assertj.core.api.Assertions; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java index 15ea75476a93d..84eb67bc18fa5 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java @@ -34,7 +34,7 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.assertj.core.api.Assertions; import org.hamcrest.core.StringStartsWith; import org.junit.After; @@ -54,6 +54,8 @@ import org.apache.hadoop.fs.s3a.AWSClientIOException; import org.apache.hadoop.fs.s3a.MockS3AFileSystem; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.commit.AbstractS3ACommitter; +import org.apache.hadoop.fs.s3a.commit.PathCommitException; import org.apache.hadoop.fs.s3a.commit.files.PendingSet; import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit; import org.apache.hadoop.mapred.JobConf; @@ -84,8 +86,13 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest { private static final JobID JOB_ID = new JobID("job", 1); + + public static final TaskID TASK_ID = new TaskID(JOB_ID, TaskType.REDUCE, 2); + private static final TaskAttemptID AID = new TaskAttemptID( - new TaskID(JOB_ID, TaskType.REDUCE, 2), 3); + TASK_ID, 1); + private static final TaskAttemptID AID2 = new TaskAttemptID( + TASK_ID, 2); private static final Logger LOG = LoggerFactory.getLogger(TestStagingCommitter.class); @@ -141,8 +148,8 @@ public void setupCommitter() throws Exception { jobConf.setInt(FS_S3A_COMMITTER_THREADS, numThreads); jobConf.setBoolean(FS_S3A_COMMITTER_STAGING_UNIQUE_FILENAMES, uniqueFilenames); - jobConf.set(FS_S3A_COMMITTER_STAGING_UUID, - UUID.randomUUID().toString()); + jobConf.set(FS_S3A_COMMITTER_UUID, + uuid()); jobConf.set(RETRY_INTERVAL, "100ms"); jobConf.setInt(RETRY_LIMIT, 1); @@ -190,36 +197,137 @@ public void cleanup() { } } + private Configuration newConfig() { + return new Configuration(false); + } + @Test public void testUUIDPropagation() throws Exception { - Configuration config = new Configuration(); - String jobUUID = addUUID(config); - assertEquals("Upload UUID", jobUUID, - StagingCommitter.getUploadUUID(config, JOB_ID)); + Configuration config = newConfig(); + String uuid = uuid(); + config.set(SPARK_WRITE_UUID, uuid); + config.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true); + Pair t3 = AbstractS3ACommitter + .buildJobUUID(config, JOB_ID); + assertEquals("Job UUID", uuid, t3.getLeft()); + assertEquals("Job UUID source: " + t3, + AbstractS3ACommitter.JobUUIDSource.SparkWriteUUID, + t3.getRight()); + } + + /** + * If the Spark UUID is required, then binding will fail + * if a UUID did not get passed in. + */ + @Test + public void testUUIDValidation() throws Exception { + Configuration config = newConfig(); + config.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true); + intercept(PathCommitException.class, E_NO_SPARK_UUID, () -> + AbstractS3ACommitter.buildJobUUID(config, JOB_ID)); + } + + /** + * Validate ordering of UUID retrieval. + */ + @Test + public void testUUIDLoadOrdering() throws Exception { + Configuration config = newConfig(); + config.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true); + String uuid = uuid(); + // MUST be picked up + config.set(FS_S3A_COMMITTER_UUID, uuid); + config.set(SPARK_WRITE_UUID, "something"); + Pair t3 = AbstractS3ACommitter + .buildJobUUID(config, JOB_ID); + assertEquals("Job UUID", uuid, t3.getLeft()); + assertEquals("Job UUID source: " + t3, + AbstractS3ACommitter.JobUUIDSource.CommitterUUIDProperty, + t3.getRight()); + } + + /** + * Verify that unless the config enables self-generation, JobIDs + * are used. + */ + @Test + public void testJobIDIsUUID() throws Exception { + Configuration config = newConfig(); + Pair t3 = AbstractS3ACommitter + .buildJobUUID(config, JOB_ID); + assertEquals("Job UUID source: " + t3, + AbstractS3ACommitter.JobUUIDSource.JobID, + t3.getRight()); + // parse it as a JobID + JobID.forName(t3.getLeft()); } + /** + * Verify self-generated UUIDs are supported when enabled, + * and come before JobID. + */ + @Test + public void testSelfGeneratedUUID() throws Exception { + Configuration config = newConfig(); + config.setBoolean(FS_S3A_COMMITTER_GENERATE_UUID, true); + Pair t3 = AbstractS3ACommitter + .buildJobUUID(config, JOB_ID); + assertEquals("Job UUID source: " + t3, + AbstractS3ACommitter.JobUUIDSource.GeneratedLocally, + t3.getRight()); + // parse it + UUID.fromString(t3.getLeft()); + } + + /** + * Create a UUID and add it as the staging UUID. + * @param config config to patch + * @return the UUID + */ private String addUUID(Configuration config) { - String jobUUID = UUID.randomUUID().toString(); - config.set(FS_S3A_COMMITTER_STAGING_UUID, jobUUID); + String jobUUID = uuid(); + config.set(FS_S3A_COMMITTER_UUID, jobUUID); return jobUUID; } + /** + * Create a new UUID. + * @return a uuid as a string. + */ + private String uuid() { + return UUID.randomUUID().toString(); + } + @Test public void testAttemptPathConstructionNoSchema() throws Exception { - Configuration config = new Configuration(); + Configuration config = newConfig(); final String jobUUID = addUUID(config); config.set(BUFFER_DIR, "/tmp/mr-local-0,/tmp/mr-local-1"); String commonPath = "file:/tmp/mr-local-"; + Assertions.assertThat(getLocalTaskAttemptTempDir(config, + jobUUID, tac.getTaskAttemptID()).toString()) + .describedAs("Missing scheme should produce local file paths") + .startsWith(commonPath) + .contains(jobUUID); + } - assertThat("Missing scheme should produce local file paths", - getLocalTaskAttemptTempDir(config, - jobUUID, tac.getTaskAttemptID()).toString(), - StringStartsWith.startsWith(commonPath)); + @Test + public void testAttemptPathsDifferentByTaskAttempt() throws Exception { + Configuration config = newConfig(); + final String jobUUID = addUUID(config); + config.set(BUFFER_DIR, "file:/tmp/mr-local-0"); + String attempt1Path = getLocalTaskAttemptTempDir(config, + jobUUID, AID).toString(); + String attempt2Path = getLocalTaskAttemptTempDir(config, + jobUUID, AID2).toString(); + Assertions.assertThat(attempt2Path) + .describedAs("local task attempt dir of TA1 must not match that of TA2") + .isNotEqualTo(attempt1Path); } @Test public void testAttemptPathConstructionWithSchema() throws Exception { - Configuration config = new Configuration(); + Configuration config = newConfig(); final String jobUUID = addUUID(config); String commonPath = "file:/tmp/mr-local-"; @@ -234,7 +342,7 @@ public void testAttemptPathConstructionWithSchema() throws Exception { @Test public void testAttemptPathConstructionWrongSchema() throws Exception { - Configuration config = new Configuration(); + Configuration config = newConfig(); final String jobUUID = addUUID(config); config.set(BUFFER_DIR, "hdfs://nn:8020/tmp/mr-local-0,hdfs://nn:8020/tmp/mr-local-1"); @@ -270,7 +378,7 @@ public void testSingleTaskCommit() throws Exception { assertEquals("Should name the commits file with the task ID: " + results, "task_job_0001_r_000002", stats[0].getPath().getName()); - PendingSet pending = PendingSet.load(dfs, stats[0].getPath()); + PendingSet pending = PendingSet.load(dfs, stats[0]); assertEquals("Should have one pending commit", 1, pending.size()); SinglePendingCommit commit = pending.getCommits().get(0); assertEquals("Should write to the correct bucket:" + results, @@ -310,8 +418,7 @@ public void testSingleTaskEmptyFileCommit() throws Exception { assertEquals("Should name the commits file with the task ID", "task_job_0001_r_000002", stats[0].getPath().getName()); - PendingSet pending = PendingSet.load(dfs, - stats[0].getPath()); + PendingSet pending = PendingSet.load(dfs, stats[0]); assertEquals("Should have one pending commit", 1, pending.size()); } @@ -334,7 +441,7 @@ public void testSingleTaskMultiFileCommit() throws Exception { "task_job_0001_r_000002", stats[0].getPath().getName()); List pending = - PendingSet.load(dfs, stats[0].getPath()).getCommits(); + PendingSet.load(dfs, stats[0]).getCommits(); assertEquals("Should have correct number of pending commits", files.size(), pending.size()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java index cb332b89489c0..ce55480323872 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java @@ -28,7 +28,7 @@ import java.util.UUID; import java.util.stream.Collectors; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.junit.After; import org.junit.Test; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedJobCommit.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedJobCommit.java index 872097ff6f032..86b677c70a305 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedJobCommit.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedJobCommit.java @@ -110,7 +110,7 @@ protected ActiveCommit listPendingUploadsToCommit( file.deleteOnExit(); Path path = new Path(file.toURI()); pendingSet.save(localFS, path, true); - activeCommit.add(path); + activeCommit.add(localFS.getFileStatus(path)); } return activeCommit; } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java index 4b568263ba71b..2f46b72fbaecb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java @@ -25,8 +25,8 @@ import java.util.UUID; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.assertj.core.api.Assertions; import org.junit.BeforeClass; import org.junit.Test; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocol.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocol.java index 180e7435222db..826c3cd2743a7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocol.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocol.java @@ -19,8 +19,11 @@ package org.apache.hadoop.fs.s3a.commit.staging.integration; import java.io.IOException; +import java.util.UUID; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; @@ -34,6 +37,7 @@ import org.apache.hadoop.fs.s3a.commit.staging.Paths; import org.apache.hadoop.fs.s3a.commit.staging.StagingCommitter; import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.TaskAttemptContext; @@ -68,8 +72,15 @@ public void setup() throws Exception { // identify working dir for staging and delete Configuration conf = getConfiguration(); - String uuid = StagingCommitter.getUploadUUID(conf, - getTaskAttempt0().getJobID()); + String uuid = UUID.randomUUID().toString(); + conf.set(InternalCommitterConstants.SPARK_WRITE_UUID, + uuid); + Pair t3 = AbstractS3ACommitter + .buildJobUUID(conf, JobID.forName("job_" + getJobId())); + assertEquals("Job UUID", uuid, t3.getLeft()); + assertEquals("Job UUID source: " + t3, + AbstractS3ACommitter.JobUUIDSource.SparkWriteUUID, + t3.getRight()); Path tempDir = Paths.getLocalTaskAttemptTempDir(conf, uuid, getTaskAttempt0()); rmdir(tempDir, conf); @@ -103,14 +114,20 @@ protected void expectJobCommitToFail(JobContext jContext, IOException.class); } - protected void validateTaskAttemptPathDuringWrite(Path p) throws IOException { + protected void validateTaskAttemptPathDuringWrite(Path p, + final long expectedLength) throws IOException { // this is expected to be local FS ContractTestUtils.assertPathExists(getLocalFS(), "task attempt", p); } - protected void validateTaskAttemptPathAfterWrite(Path p) throws IOException { + protected void validateTaskAttemptPathAfterWrite(Path p, + final long expectedLength) throws IOException { // this is expected to be local FS - ContractTestUtils.assertPathExists(getLocalFS(), "task attempt", p); + // this is expected to be local FS + FileSystem localFS = getLocalFS(); + ContractTestUtils.assertPathExists(localFS, "task attempt", p); + FileStatus st = localFS.getFileStatus(p); + assertEquals("file length in " + st, expectedLength, st.getLen()); } protected FileSystem getLocalFS() throws IOException { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java index dc6c6d19db9ab..32f909231c217 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java @@ -54,7 +54,6 @@ import static java.util.Optional.empty; import static org.apache.hadoop.fs.s3a.S3ATestUtils.lsR; -import static org.apache.hadoop.fs.s3a.commit.CommitConstants.MAGIC_COMMITTER_ENABLED; /** * Runs Terasort against S3A. @@ -155,7 +154,6 @@ public void setup() throws Exception { @Override protected void applyCustomConfigOptions(JobConf conf) { // small sample size for faster runs - conf.setBoolean(MAGIC_COMMITTER_ENABLED, true); conf.setInt(TeraSortConfigKeys.SAMPLE_SIZE.key(), getSampleSizeForEachPartition()); conf.setInt(TeraSortConfigKeys.NUM_PARTITIONS.key(), @@ -242,7 +240,7 @@ private void executeStage( + "(" + StringUtils.join(", ", args) + ")" + " failed", 0, result); validateSuccessFile(dest, committerName(), getFileSystem(), stage, - minimumFileCount); + minimumFileCount, ""); completedStage(stage, d); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java index 1b53ef5a9185b..c920be13230dd 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java @@ -32,8 +32,9 @@ import java.util.stream.Stream; import com.amazonaws.services.s3.model.MultiObjectDeleteException; -import com.google.common.base.Charsets; -import com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;; import org.assertj.core.api.Assertions; import org.junit.Test; import org.junit.runner.RunWith; @@ -42,12 +43,12 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.DurationInfo; @@ -57,7 +58,9 @@ import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; import static org.apache.hadoop.fs.s3a.S3AUtils.applyLocatedFiles; import static org.apache.hadoop.fs.s3a.Statistic.FILES_DELETE_REJECTED; -import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_DELETE_REQUESTS; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_BULK_DELETE_REQUEST; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_DELETE_OBJECTS; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_DELETE_REQUEST; import static org.apache.hadoop.fs.s3a.auth.RoleModel.Effects; import static org.apache.hadoop.fs.s3a.auth.RoleModel.Statement; import static org.apache.hadoop.fs.s3a.auth.RoleModel.directory; @@ -71,8 +74,10 @@ import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion; import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.extractUndeletedPaths; import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.removeUndeletedPaths; +import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.toPathList; import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.assertFileCount; import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.extractCause; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; import static org.apache.hadoop.test.LambdaTestUtils.eval; @@ -124,11 +129,12 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { * For submitting work. */ private static final ListeningExecutorService EXECUTOR = - BlockingThreadPoolExecutorService.newInstance( - EXECUTOR_THREAD_COUNT, - EXECUTOR_THREAD_COUNT * 2, - 30, TimeUnit.SECONDS, - "test-operations"); + MoreExecutors.listeningDecorator( + BlockingThreadPoolExecutorService.newInstance( + EXECUTOR_THREAD_COUNT, + EXECUTOR_THREAD_COUNT * 2, + 30, TimeUnit.SECONDS, + "test-operations")); /** @@ -330,27 +336,37 @@ protected Configuration createConfiguration() { removeBucketOverrides(bucketName, conf, MAX_THREADS, MAXIMUM_CONNECTIONS, - S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY); + S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, + DIRECTORY_MARKER_POLICY, + BULK_DELETE_PAGE_SIZE); conf.setInt(MAX_THREADS, EXECUTOR_THREAD_COUNT); conf.setInt(MAXIMUM_CONNECTIONS, EXECUTOR_THREAD_COUNT * 2); // turn off prune delays, so as to stop scale tests creating // so much cruft that future CLI prune commands take forever conf.setInt(S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, 0); + // use the keep policy to ensure that surplus markers exist + // to complicate failures + conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_KEEP); + // set the delete page size to its maximum to ensure that all + // entries are included in the same large delete, even on + // scale runs. This is needed for assertions on the result. + conf.setInt(BULK_DELETE_PAGE_SIZE, 1_000); return conf; } /** * Create a unique path, which includes method name, - * multidelete flag and a random UUID. + * multidelete flag and a timestamp. * @return a string to use for paths. * @throws IOException path creation failure. */ private Path uniquePath() throws IOException { + long now = System.currentTimeMillis(); return path( - String.format("%s-%s-%04d", + String.format("%s-%s-%06d.%03d", getMethodName(), multiDelete ? "multi" : "single", - System.currentTimeMillis() % 10000)); + now / 1000, now % 1000)); } /** @@ -476,8 +492,11 @@ public void testRenameDirFailsInDelete() throws Throwable { // create a set of files // this is done in parallel as it is 10x faster on a long-haul test run. - List createdFiles = createFiles(fs, readOnlyDir, dirDepth, fileCount, - dirCount); + List dirs = new ArrayList<>(dirCount); + List createdFiles = createDirsAndFiles(fs, readOnlyDir, dirDepth, + fileCount, dirCount, + new ArrayList<>(fileCount), + dirs); // are they all there? int expectedFileCount = createdFiles.size(); assertFileCount("files ready to rename", roleFS, @@ -494,26 +513,36 @@ public void testRenameDirFailsInDelete() throws Throwable { MultiObjectDeleteException.class, deniedException); final List undeleted = extractUndeletedPaths(mde, fs::keyToQualifiedPath); + + List expectedUndeletedFiles = new ArrayList<>(createdFiles); + if (getFileSystem().getDirectoryMarkerPolicy() + .keepDirectoryMarkers(readOnlyDir)) { + // directory markers are being retained, + // so will also be in the list of undeleted files + expectedUndeletedFiles.addAll(dirs); + } Assertions.assertThat(undeleted) .as("files which could not be deleted") - .hasSize(expectedFileCount) - .containsAll(createdFiles) - .containsExactlyInAnyOrderElementsOf(createdFiles); + .containsExactlyInAnyOrderElementsOf(expectedUndeletedFiles); } LOG.info("Result of renaming read-only files is as expected", deniedException); assertFileCount("files in the source directory", roleFS, readOnlyDir, expectedFileCount); // now lets look at the destination. - // even with S3Guard on, we expect the destination to match that of our + // even with S3Guard on, we expect the destination to match that of // the remote state. // the test will exist describe("Verify destination directory exists"); - FileStatus st = roleFS.getFileStatus(writableDir); - assertTrue("Not a directory: " + st, - st.isDirectory()); + assertIsDirectory(writableDir); assertFileCount("files in the dest directory", roleFS, writableDir, expectedFileCount); + // all directories in the source tree must still exist, + // which for S3Guard means no tombstone markers were added + LOG.info("Verifying all directories still exist"); + for (Path dir : dirs) { + assertIsDirectory(dir); + } } @Test @@ -610,9 +639,14 @@ public void testPartialDirDelete() throws Throwable { // the full FS S3AFileSystem fs = getFileSystem(); - - List readOnlyFiles = createFiles(fs, readOnlyDir, - dirDepth, fileCount, dirCount); + StoreContext storeContext = fs.createStoreContext(); + + List dirs = new ArrayList<>(dirCount); + List readOnlyFiles = createDirsAndFiles( + fs, readOnlyDir, dirDepth, + fileCount, dirCount, + new ArrayList<>(fileCount), + dirs); List deletableFiles = createFiles(fs, writableDir, dirDepth, fileCount, dirCount); @@ -624,20 +658,36 @@ public void testPartialDirDelete() throws Throwable { readOnlyFiles.stream(), deletableFiles.stream()) .collect(Collectors.toList()); + List keyPaths = allFiles.stream() + .map(path -> + new MultiObjectDeleteSupport.KeyPath( + storeContext.pathToKey(path), + path, + false)) + .collect(Collectors.toList()); // this set can be deleted by the role FS MetricDiff rejectionCount = new MetricDiff(roleFS, FILES_DELETE_REJECTED); - MetricDiff deleteVerbCount = new MetricDiff(roleFS, OBJECT_DELETE_REQUESTS); + MetricDiff deleteVerbCount = new MetricDiff(roleFS, OBJECT_DELETE_REQUEST); + MetricDiff bulkDeleteVerbCount = new MetricDiff(roleFS, + OBJECT_BULK_DELETE_REQUEST); + MetricDiff deleteObjectCount = new MetricDiff(roleFS, + OBJECT_DELETE_OBJECTS); describe("Trying to delete read only directory"); AccessDeniedException ex = expectDeleteForbidden(readOnlyDir); if (multiDelete) { // multi-delete status checks extractCause(MultiObjectDeleteException.class, ex); + deleteVerbCount.assertDiffEquals("Wrong delete request count", 0); + bulkDeleteVerbCount.assertDiffEquals("Wrong bulk delete request count", + 1); + deleteObjectCount.assertDiffEquals("Number of keys in delete request", + readOnlyFiles.size()); rejectionCount.assertDiffEquals("Wrong rejection count", readOnlyFiles.size()); - deleteVerbCount.assertDiffEquals("Wrong delete count", 1); - reset(rejectionCount, deleteVerbCount); + reset(rejectionCount, deleteVerbCount, deleteObjectCount, + bulkDeleteVerbCount); } // all the files are still there? (avoid in scale test due to cost) if (!scaleTest) { @@ -646,18 +696,26 @@ public void testPartialDirDelete() throws Throwable { describe("Trying to delete upper-level directory"); ex = expectDeleteForbidden(basePath); + String iostats = ioStatisticsSourceToString(roleFS); + if (multiDelete) { // multi-delete status checks - extractCause(MultiObjectDeleteException.class, ex); - deleteVerbCount.assertDiffEquals("Wrong delete count", 1); + deleteVerbCount.assertDiffEquals("Wrong delete request count", 0); + bulkDeleteVerbCount.assertDiffEquals( + "Wrong count of delete operations in " + iostats, 1); MultiObjectDeleteException mde = extractCause( MultiObjectDeleteException.class, ex); - final List undeleted - = removeUndeletedPaths(mde, allFiles, fs::keyToQualifiedPath); + List undeletedKeyPaths = + removeUndeletedPaths(mde, keyPaths, storeContext::keyToPath); + final List undeleted = toPathList( + undeletedKeyPaths); + deleteObjectCount.assertDiffEquals( + "Wrong count of objects in delete request", + allFiles.size()); Assertions.assertThat(undeleted) .as("files which could not be deleted") .containsExactlyInAnyOrderElementsOf(readOnlyFiles); - Assertions.assertThat(allFiles) + Assertions.assertThat(toPathList(keyPaths)) .as("files which were deleted") .containsExactlyInAnyOrderElementsOf(deletableFiles); rejectionCount.assertDiffEquals("Wrong rejection count", @@ -676,7 +734,26 @@ public void testPartialDirDelete() throws Throwable { Assertions.assertThat(readOnlyListing) .as("ReadOnly directory " + directoryList) - .containsAll(readOnlyFiles); + .containsExactlyInAnyOrderElementsOf(readOnlyFiles); + } + + /** + * Verifies the logic of handling directory markers in + * delete operations, specifically: + *
      + *
    1. all markers above empty directories MUST be deleted
    2. + *
    3. all markers above non-empty directories MUST NOT be deleted
    4. + *
    + * As the delete list may include subdirectories, we need to work up from + * the bottom of the list of deleted files before probing the parents, + * that being done by a s3guard get(path, need-empty-directory) call. + *

    + * This is pretty sensitive code. + */ + @Test + public void testSubdirDeleteFailures() throws Throwable { + describe("Multiobject delete handling of directorYesFory markers"); + assume("Multiobject delete only", multiDelete); } /** @@ -770,7 +847,7 @@ private static CompletableFuture put(FileSystem fs, } /** - * Parallel-touch a set of files in the destination directory. + * Build a set of files in a directory tree. * @param fs filesystem * @param destDir destination * @param depth file depth @@ -783,12 +860,48 @@ public static List createFiles(final FileSystem fs, final int depth, final int fileCount, final int dirCount) throws IOException { - List> futures = new ArrayList<>(fileCount); - List paths = new ArrayList<>(fileCount); - List dirs = new ArrayList<>(fileCount); + return createDirsAndFiles(fs, destDir, depth, fileCount, dirCount, + new ArrayList(fileCount), + new ArrayList(dirCount)); + } + + /** + * Build a set of files in a directory tree. + * @param fs filesystem + * @param destDir destination + * @param depth file depth + * @param fileCount number of files to create. + * @param dirCount number of dirs to create at each level + * @param paths [out] list of file paths created + * @param dirs [out] list of directory paths created. + * @return the list of files created. + */ + public static List createDirsAndFiles(final FileSystem fs, + final Path destDir, + final int depth, + final int fileCount, + final int dirCount, + final List paths, + final List dirs) throws IOException { buildPaths(paths, dirs, destDir, depth, fileCount, dirCount); + List> futures = new ArrayList<>(paths.size() + + dirs.size()); + + // create directories. With dir marker retention, that adds more entries + // to cause deletion issues + try (DurationInfo ignore = + new DurationInfo(LOG, "Creating %d directories", dirs.size())) { + for (Path path : dirs) { + futures.add(submit(EXECUTOR, () ->{ + fs.mkdirs(path); + return path; + })); + } + waitForCompletion(futures); + } + try (DurationInfo ignore = - new DurationInfo(LOG, "Creating %d files", fileCount)) { + new DurationInfo(LOG, "Creating %d files", paths.size())) { for (Path path : paths) { futures.add(put(fs, path, path.getName())); } @@ -839,4 +952,48 @@ private static void buildPaths( public static String filenameOfIndex(final int i) { return String.format("%s%03d", PREFIX, i); } + + /** + * Verifies that s3:DeleteObjectVersion is not required for rename. + *

    + * See HADOOP-17621. + *

    + * This test will only show a regression if the bucket has versioning + * enabled *and* S3Guard is enabled. + */ + @Test + public void testRenamePermissionRequirements() throws Throwable { + describe("Verify rename() only needs s3:DeleteObject permission"); + // close the existing roleFS + IOUtils.cleanupWithLogger(LOG, roleFS); + + // create an assumed role config which doesn't have + // s3:DeleteObjectVersion permission, and attempt rename + // and then delete. + Configuration roleConfig = createAssumedRoleConfig(); + bindRolePolicyStatements(roleConfig, + STATEMENT_S3GUARD_CLIENT, + STATEMENT_ALLOW_SSE_KMS_RW, + STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x + new Statement(Effects.Allow) // dest: rwx + .addActions(S3_PATH_RW_OPERATIONS) + .addResources(directory(basePath)), + new Statement(Effects.Deny) + .addActions(S3_DELETE_OBJECT_VERSION) + .addResources(directory(basePath))); + roleFS = (S3AFileSystem) basePath.getFileSystem(roleConfig); + + Path srcDir = new Path(basePath, "src"); + Path destDir = new Path(basePath, "dest"); + roleFS.mkdirs(srcDir); + + // the role FS has everything but that deleteObjectVersion permission, so + // MUST be able to create files + List createdFiles = createFiles(roleFS, srcDir, dirDepth, fileCount, + dirCount); + roleFS.rename(srcDir, destDir); + roleFS.rename(destDir, srcDir); + roleFS.delete(srcDir, true); + + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java new file mode 100644 index 0000000000000..9885eb5698477 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java @@ -0,0 +1,248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +import com.amazonaws.AmazonClientException; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.util.BlockingThreadPoolExecutorService; + +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableS3GuardInTestBucket; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit; +import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion; +import static org.apache.hadoop.io.IOUtils.cleanupWithLogger; + +/** + * HADOOP-16721: race condition with delete and rename underneath the + * same destination directory. + * This test suite recreates the failure using semaphores to + * guarantee the failure condition is encountered + * -then verifies that the rename operation is successful. + */ +public class ITestRenameDeleteRace extends AbstractS3ATestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestRenameDeleteRace.class); + + + /** Many threads for scale performance: {@value}. */ + public static final int EXECUTOR_THREAD_COUNT = 2; + + /** + * For submitting work. + */ + private static final BlockingThreadPoolExecutorService EXECUTOR = + BlockingThreadPoolExecutorService.newInstance( + EXECUTOR_THREAD_COUNT, + EXECUTOR_THREAD_COUNT * 2, + 30, TimeUnit.SECONDS, + "test-operations"); + + @Override + protected Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + + // use the keep policy to ensure that surplus markers exist + // to complicate failures + conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_DELETE); + removeBaseAndBucketOverrides(getTestBucketName(conf), + conf, + DIRECTORY_MARKER_POLICY); + disableS3GuardInTestBucket(conf); + return conf; + } + + /** + * This test uses a subclass of S3AFileSystem to recreate the race between + * subdirectory delete and rename. + * The JUnit thread performs the rename, while an executor-submitted + * thread performs the delete. + * Semaphores are used to + * -block the JUnit thread from initiating the rename until the delete + * has finished the delete phase, and has reached the + * {@code maybeCreateFakeParentDirectory()} call. + * A second semaphore is used to block the delete thread from + * listing and recreating the deleted directory until after + * the JUnit thread has completed. + * Together, the two semaphores guarantee that the rename() + * call will be made at exactly the moment when the destination + * directory no longer exists. + */ + @Test + public void testDeleteRenameRaceCondition() throws Throwable { + describe("verify no race between delete and rename"); + + // the normal FS is used for path setup, verification + // and the rename call. + final S3AFileSystem fs = getFileSystem(); + final Path path = path(getMethodName()); + Path srcDir = new Path(path, "src"); + + // this dir must exist throughout the rename + Path destDir = new Path(path, "dest"); + // this dir tree will be deleted in a thread which does not + // complete before the rename exists + Path destSubdir1 = new Path(destDir, "subdir1"); + Path subfile1 = new Path(destSubdir1, "subfile1"); + + // this is the directory we want to copy over under the dest dir + Path srcSubdir2 = new Path(srcDir, "subdir2"); + Path srcSubfile = new Path(srcSubdir2, "subfile2"); + Path destSubdir2 = new Path(destDir, "subdir2"); + + // creates subfile1 and all parents, so that + // dest/subdir1/subfile1 exists as a file; + // dest/subdir1 and dest are directories without markers + ContractTestUtils.touch(fs, subfile1); + assertIsDirectory(destDir); + + // source subfile + ContractTestUtils.touch(fs, srcSubfile); + + // this is the FS used for delete() + final BlockingFakeDirMarkerFS blockingFS + = new BlockingFakeDirMarkerFS(); + blockingFS.initialize(fs.getUri(), fs.getConf()); + // get the semaphore; this ensures that the next attempt to create + // a fake marker blocks + blockingFS.blockFakeDirCreation(); + try { + final CompletableFuture future = submit(EXECUTOR, () -> { + LOG.info("deleting {}", destSubdir1); + blockingFS.delete(destSubdir1, true); + return destSubdir1; + }); + + // wait for the blocking FS to return from the DELETE call. + blockingFS.awaitFakeDirCreation(); + + try { + // there is now no destination directory + assertPathDoesNotExist("should have been implicitly deleted", + destDir); + + // attempt the rename in the normal FS. + LOG.info("renaming {} to {}", srcSubdir2, destSubdir2); + Assertions.assertThat(fs.rename(srcSubdir2, destSubdir2)) + .describedAs("rename(%s, %s)", srcSubdir2, destSubdir2) + .isTrue(); + // dest dir implicitly exists. + assertPathExists("must now exist", destDir); + } finally { + // release the remaining semaphore so that the deletion thread exits. + blockingFS.allowFakeDirCreationToProceed(); + } + + // now let the delete complete + LOG.info("Waiting for delete {} to finish", destSubdir1); + waitForCompletion(future); + + // everything still exists + assertPathExists("must now exist", destDir); + assertPathExists("must now exist", new Path(destSubdir2, "subfile2")); + assertPathDoesNotExist("Src dir deleted", srcSubdir2); + + } finally { + cleanupWithLogger(LOG, blockingFS); + } + + } + + /** + * Subclass of S3A FS whose execution of maybeCreateFakeParentDirectory + * can be choreographed with another thread so as to reliably + * create the delete/rename race condition. + * This class is only intended for "single shot" API calls. + */ + private final class BlockingFakeDirMarkerFS extends S3AFileSystem { + + /** + * Block for entry into maybeCreateFakeParentDirectory(); will be released + * then. + */ + private final Semaphore signalCreatingFakeParentDirectory = + new Semaphore(1); + + /** + * Semaphore to acquire before the marker can be listed/created. + */ + private final Semaphore blockBeforeCreatingMarker = new Semaphore(1); + + private BlockingFakeDirMarkerFS() { + signalCreatingFakeParentDirectory.acquireUninterruptibly(); + } + + @Override + protected void maybeCreateFakeParentDirectory(final Path path) + throws IOException, AmazonClientException { + LOG.info("waking anything blocked on the signal semaphore"); + // notify anything waiting + signalCreatingFakeParentDirectory.release(); + // acquire the semaphore and then create any fake directory + LOG.info("blocking for creation"); + blockBeforeCreatingMarker.acquireUninterruptibly(); + try { + LOG.info("probing for/creating markers"); + super.maybeCreateFakeParentDirectory(path); + } finally { + // and release the marker for completeness. + blockBeforeCreatingMarker.release(); + } + } + + /** + * Block until fake dir creation is invoked. + */ + public void blockFakeDirCreation() throws InterruptedException { + blockBeforeCreatingMarker.acquire(); + } + + /** + * wait for the blocking FS to return from the DELETE call. + */ + public void awaitFakeDirCreation() throws InterruptedException { + LOG.info("Blocking until maybeCreateFakeParentDirectory() is reached"); + signalCreatingFakeParentDirectory.acquire(); + } + + public void allowFakeDirCreationToProceed() { + LOG.info("Allowing the fake directory LIST/PUT to proceed."); + blockBeforeCreatingMarker.release(); + } + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java new file mode 100644 index 0000000000000..aa3cecaf1eb7a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.FileNotFoundException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import org.assertj.core.api.AbstractStringAssert; +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.performance.AbstractS3ACostTest; + +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_OP_XATTR_LIST; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_XATTR_GET_MAP; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_XATTR_GET_NAMED; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.CONTENT_TYPE_OCTET_STREAM; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.CONTENT_TYPE_APPLICATION_XML; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_CONTENT_LENGTH; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_CONTENT_TYPE; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_STANDARD_HEADERS; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.decodeBytes; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.CREATE_FILE_OVERWRITE; + +/** + * Invoke XAttr API calls against objects in S3 and validate header + * extraction. + */ +public class ITestXAttrCost extends AbstractS3ACostTest { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestXAttrCost.class); + + private static final int GET_METADATA_ON_OBJECT = 1; + private static final int GET_METADATA_ON_DIR = GET_METADATA_ON_OBJECT * 2; + + public ITestXAttrCost() { + // no parameterization here + super(false, true, false); + } + + @Test + public void testXAttrRoot() throws Throwable { + describe("Test xattr on root"); + Path root = new Path("/"); + S3AFileSystem fs = getFileSystem(); + Map xAttrs = verifyMetrics( + () -> fs.getXAttrs(root), + with(INVOCATION_XATTR_GET_MAP, GET_METADATA_ON_OBJECT)); + logXAttrs(xAttrs); + List headerList = verifyMetrics(() -> + fs.listXAttrs(root), + with(INVOCATION_OP_XATTR_LIST, GET_METADATA_ON_OBJECT)); + + // verify this contains all the standard markers, + // but not the magic marker header + Assertions.assertThat(headerList) + .describedAs("Headers on root object") + .containsOnly( + XA_CONTENT_LENGTH, + XA_CONTENT_TYPE); + assertHeaderEntry(xAttrs, XA_CONTENT_TYPE) + .isEqualTo(CONTENT_TYPE_APPLICATION_XML); + } + + /** + * Log the attributes as strings. + * @param xAttrs map of attributes + */ + private void logXAttrs(final Map xAttrs) { + xAttrs.forEach((k, v) -> + LOG.info("{} has bytes[{}] => \"{}\"", + k, v.length, decodeBytes(v))); + } + + @Test + public void testXAttrFile() throws Throwable { + describe("Test xattr on a file"); + Path testFile = methodPath(); + create(testFile, true, CREATE_FILE_OVERWRITE); + S3AFileSystem fs = getFileSystem(); + Map xAttrs = verifyMetrics(() -> + fs.getXAttrs(testFile), + with(INVOCATION_XATTR_GET_MAP, GET_METADATA_ON_OBJECT)); + logXAttrs(xAttrs); + assertHeaderEntry(xAttrs, XA_CONTENT_LENGTH) + .isEqualTo("0"); + + // get the list of supported headers + List headerList = verifyMetrics( + () -> fs.listXAttrs(testFile), + with(INVOCATION_OP_XATTR_LIST, GET_METADATA_ON_OBJECT)); + // verify this contains all the standard markers, + // but not the magic marker header + Assertions.assertThat(headerList) + .describedAs("Supported headers") + .containsAnyElementsOf(Arrays.asList(XA_STANDARD_HEADERS)); + + // ask for one header and validate its value + byte[] bytes = verifyMetrics(() -> + fs.getXAttr(testFile, XA_CONTENT_LENGTH), + with(INVOCATION_XATTR_GET_NAMED, GET_METADATA_ON_OBJECT)); + assertHeader(XA_CONTENT_LENGTH, bytes) + .isEqualTo("0"); + assertHeaderEntry(xAttrs, XA_CONTENT_TYPE) + .isEqualTo(CONTENT_TYPE_OCTET_STREAM); + } + + /** + * Directory attributes can be retrieved, but they take two HEAD requests. + * @throws Throwable + */ + @Test + public void testXAttrDir() throws Throwable { + describe("Test xattr on a dir"); + + S3AFileSystem fs = getFileSystem(); + Path dir = methodPath(); + fs.mkdirs(dir); + Map xAttrs = verifyMetrics(() -> + fs.getXAttrs(dir), + with(INVOCATION_XATTR_GET_MAP, GET_METADATA_ON_DIR)); + logXAttrs(xAttrs); + assertHeaderEntry(xAttrs, XA_CONTENT_LENGTH) + .isEqualTo("0"); + + // get the list of supported headers + List headerList = verifyMetrics( + () -> fs.listXAttrs(dir), + with(INVOCATION_OP_XATTR_LIST, GET_METADATA_ON_DIR)); + // verify this contains all the standard markers, + // but not the magic marker header + Assertions.assertThat(headerList) + .describedAs("Supported headers") + .containsAnyElementsOf(Arrays.asList(XA_STANDARD_HEADERS)); + + // ask for one header and validate its value + byte[] bytes = verifyMetrics(() -> + fs.getXAttr(dir, XA_CONTENT_LENGTH), + with(INVOCATION_XATTR_GET_NAMED, GET_METADATA_ON_DIR)); + assertHeader(XA_CONTENT_LENGTH, bytes) + .isEqualTo("0"); + assertHeaderEntry(xAttrs, XA_CONTENT_TYPE) + .isEqualTo(CONTENT_TYPE_OCTET_STREAM); + } + + /** + * When the operations are called on a missing path, FNFE is + * raised and only one attempt is made to retry the operation. + */ + @Test + public void testXAttrMissingFile() throws Throwable { + describe("Test xattr on a missing path"); + Path testFile = methodPath(); + S3AFileSystem fs = getFileSystem(); + int getMetadataOnMissingFile = GET_METADATA_ON_DIR; + verifyMetricsIntercepting(FileNotFoundException.class, "", () -> + fs.getXAttrs(testFile), + with(INVOCATION_XATTR_GET_MAP, getMetadataOnMissingFile)); + verifyMetricsIntercepting(FileNotFoundException.class, "", () -> + fs.getXAttr(testFile, XA_CONTENT_LENGTH), + with(INVOCATION_XATTR_GET_NAMED, getMetadataOnMissingFile)); + verifyMetricsIntercepting(FileNotFoundException.class, "", () -> + fs.listXAttrs(testFile), + with(INVOCATION_OP_XATTR_LIST, getMetadataOnMissingFile)); + } + + /** + * Generate an assert on a named header in the map. + * @param xAttrs attribute map + * @param key header key + * @return the assertion + */ + private AbstractStringAssert assertHeaderEntry( + Map xAttrs, String key) { + + return assertHeader(key, xAttrs.get(key)); + } + + /** + * Create an assertion on the header; check for the bytes + * being non-null/empty and then returns the decoded values + * as a string assert. + * @param key header key (for error) + * @param bytes value + * @return the assertion + */ + private AbstractStringAssert assertHeader(final String key, + final byte[] bytes) { + + String decoded = decodeBytes(bytes); + return Assertions.assertThat(decoded) + .describedAs("xattr %s decoded to: %s", key, decoded) + .isNotNull() + .isNotEmpty(); + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestDirectoryMarkerPolicy.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestDirectoryMarkerPolicy.java new file mode 100644 index 0000000000000..194cd645c0714 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestDirectoryMarkerPolicy.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.util.Arrays; +import java.util.Collection; +import java.util.function.Predicate; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE; +import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP; + +/** + * Unit tests for directory marker policies. + */ +@RunWith(Parameterized.class) +public class TestDirectoryMarkerPolicy extends AbstractHadoopTestBase { + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + return Arrays.asList(new Object[][]{ + { + DirectoryPolicy.MarkerPolicy.Delete, + FAIL_IF_INVOKED, + false, false + }, + { + DirectoryPolicy.MarkerPolicy.Keep, + FAIL_IF_INVOKED, + true, true + }, + { + DirectoryPolicy.MarkerPolicy.Authoritative, + AUTH_PATH_ONLY, + false, true + } + }); + } + + private final DirectoryPolicy directoryPolicy; + + private final boolean expectNonAuthDelete; + + private final boolean expectAuthDelete; + + public TestDirectoryMarkerPolicy( + final DirectoryPolicy.MarkerPolicy markerPolicy, + final Predicate authoritativeness, + final boolean expectNonAuthDelete, + final boolean expectAuthDelete) { + this.directoryPolicy = newPolicy(markerPolicy, authoritativeness); + this.expectNonAuthDelete = expectNonAuthDelete; + this.expectAuthDelete = expectAuthDelete; + } + + /** + * Create a new retention policy. + * @param markerPolicy policy option + * @param authoritativeness predicate for determining if + * a path is authoritative. + * @return the retention policy. + */ + private DirectoryPolicy newPolicy( + DirectoryPolicy.MarkerPolicy markerPolicy, + Predicate authoritativeness) { + return new DirectoryPolicyImpl(markerPolicy, authoritativeness); + } + + private static final Predicate AUTH_PATH_ONLY = + (p) -> p.toUri().getPath().startsWith("/auth/"); + + private static final Predicate FAIL_IF_INVOKED = (p) -> { + throw new RuntimeException("failed"); + }; + + private final Path nonAuthPath = new Path("s3a://bucket/nonauth/data"); + + private final Path authPath = new Path("s3a://bucket/auth/data1"); + + private final Path deepAuth = new Path("s3a://bucket/auth/d1/d2/data2"); + + /** + * Assert that a path has a retention outcome. + * @param path path + * @param retain should the marker be retained + */ + private void assertMarkerRetention(Path path, boolean retain) { + Assertions.assertThat(directoryPolicy.keepDirectoryMarkers(path)) + .describedAs("Retention of path %s by %s", path, directoryPolicy) + .isEqualTo(retain); + } + + /** + * Assert that a path has a capability. + */ + private void assertPathCapability(Path path, + String capability, + boolean outcome) { + Assertions.assertThat(directoryPolicy) + .describedAs("%s support for capability %s by path %s" + + " expected as %s", + directoryPolicy, capability, path, outcome) + .matches(p -> p.hasPathCapability(path, capability) == outcome, + "pathCapability"); + } + + @Test + public void testNonAuthPath() throws Throwable { + assertMarkerRetention(nonAuthPath, expectNonAuthDelete); + assertPathCapability(nonAuthPath, + STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE, + !expectNonAuthDelete); + assertPathCapability(nonAuthPath, + STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP, + expectNonAuthDelete); + } + + @Test + public void testAuthPath() throws Throwable { + assertMarkerRetention(authPath, expectAuthDelete); + assertPathCapability(authPath, + STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE, + !expectAuthDelete); + assertPathCapability(authPath, + STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP, + expectAuthDelete); + } + + @Test + public void testDeepAuthPath() throws Throwable { + assertMarkerRetention(deepAuth, expectAuthDelete); + assertPathCapability(deepAuth, + STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE, + !expectAuthDelete); + assertPathCapability(deepAuth, + STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP, + expectAuthDelete); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestHeaderProcessing.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestHeaderProcessing.java new file mode 100644 index 0000000000000..e0c6feeb256cc --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestHeaderProcessing.java @@ -0,0 +1,313 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.amazonaws.services.s3.model.ObjectMetadata; +import org.assertj.core.api.Assertions; +import org.assertj.core.util.Lists; +import org.junit.Before; +import org.junit.Test; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.fs.s3a.test.OperationTrackingStore; +import org.apache.hadoop.test.HadoopTestBase; + +import static java.lang.System.currentTimeMillis; +import static org.apache.hadoop.fs.s3a.commit.CommitConstants.XA_MAGIC_MARKER; +import static org.apache.hadoop.fs.s3a.commit.CommitConstants.X_HEADER_MAGIC_MARKER; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_LAST_MODIFIED; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_CONTENT_LENGTH; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.decodeBytes; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.encodeBytes; +import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.extractXAttrLongValue; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +/** + * Unit tests of header processing logic in {@link HeaderProcessing}. + * Builds up a context accessor where the path + * defined in {@link #MAGIC_PATH} exists and returns object metadata. + * + */ +public class TestHeaderProcessing extends HadoopTestBase { + + private static final XAttrContextAccessor CONTEXT_ACCESSORS + = new XAttrContextAccessor(); + + public static final String VALUE = "abcdeFGHIJ123!@##&82;"; + + public static final long FILE_LENGTH = 1024; + + private static final String FINAL_FILE = "s3a://bucket/dest/output.csv"; + + private StoreContext context; + + private HeaderProcessing headerProcessing; + + private static final String MAGIC_KEY + = "dest/__magic/job1/ta1/__base/output.csv"; + private static final String MAGIC_FILE + = "s3a://bucket/" + MAGIC_KEY; + + private static final Path MAGIC_PATH = + new Path(MAGIC_FILE); + + public static final long MAGIC_LEN = 4096L; + + /** + * All the XAttrs which are built up. + */ + private static final String[] RETRIEVED_XATTRS = { + XA_MAGIC_MARKER, + XA_CONTENT_LENGTH, + XA_LAST_MODIFIED + }; + + @Before + public void setup() throws Exception { + CONTEXT_ACCESSORS.len = FILE_LENGTH; + CONTEXT_ACCESSORS.userHeaders.put( + X_HEADER_MAGIC_MARKER, + Long.toString(MAGIC_LEN)); + context = S3ATestUtils.createMockStoreContext(true, + new OperationTrackingStore(), CONTEXT_ACCESSORS); + headerProcessing = new HeaderProcessing(context); + } + + @Test + public void testByteRoundTrip() throws Throwable { + Assertions.assertThat(decodeBytes(encodeBytes(VALUE))) + .describedAs("encoding of " + VALUE) + .isEqualTo(VALUE); + } + + @Test + public void testGetMarkerXAttr() throws Throwable { + assertAttributeHasValue(XA_MAGIC_MARKER, MAGIC_LEN); + } + + @Test + public void testGetLengthXAttr() throws Throwable { + assertAttributeHasValue(XA_CONTENT_LENGTH, FILE_LENGTH); + } + + /** + * Last modified makes it through. + */ + @Test + public void testGetDateXAttr() throws Throwable { + Assertions.assertThat( + decodeBytes(headerProcessing.getXAttr(MAGIC_PATH, + XA_LAST_MODIFIED))) + .describedAs("XAttribute " + XA_LAST_MODIFIED) + .isEqualTo(CONTEXT_ACCESSORS.date.toString()); + } + + /** + * The API calls on unknown paths raise 404s. + */ + @Test + public void test404() throws Throwable { + intercept(FileNotFoundException.class, () -> + headerProcessing.getXAttr(new Path(FINAL_FILE), XA_MAGIC_MARKER)); + } + + /** + * This call returns all the attributes which aren't null, including + * all the standard HTTP headers. + */ + @Test + public void testGetAllXAttrs() throws Throwable { + Map xAttrs = headerProcessing.getXAttrs(MAGIC_PATH); + Assertions.assertThat(xAttrs.keySet()) + .describedAs("Attribute keys") + .contains(RETRIEVED_XATTRS); + } + + /** + * This call returns all the attributes which aren't null, including + * all the standard HTTP headers. + */ + @Test + public void testListXAttrKeys() throws Throwable { + List xAttrs = headerProcessing.listXAttrs(MAGIC_PATH); + Assertions.assertThat(xAttrs) + .describedAs("Attribute keys") + .contains(RETRIEVED_XATTRS); + } + + /** + * Filtering is on attribute key, not header. + */ + @Test + public void testGetFilteredXAttrs() throws Throwable { + Map xAttrs = headerProcessing.getXAttrs(MAGIC_PATH, + Lists.list(XA_MAGIC_MARKER, XA_CONTENT_LENGTH, "unknown")); + Assertions.assertThat(xAttrs.keySet()) + .describedAs("Attribute keys") + .containsExactlyInAnyOrder(XA_MAGIC_MARKER, XA_CONTENT_LENGTH); + // and the values are good + assertLongAttributeValue( + XA_MAGIC_MARKER, + xAttrs.get(XA_MAGIC_MARKER), + MAGIC_LEN); + assertLongAttributeValue( + XA_CONTENT_LENGTH, + xAttrs.get(XA_CONTENT_LENGTH), + FILE_LENGTH); + } + + /** + * An empty list of keys results in empty results. + */ + @Test + public void testFilterEmptyXAttrs() throws Throwable { + Map xAttrs = headerProcessing.getXAttrs(MAGIC_PATH, + Lists.list()); + Assertions.assertThat(xAttrs.keySet()) + .describedAs("Attribute keys") + .isEmpty(); + } + + /** + * Add two headers to the metadata, then verify that + * the magic marker header is copied, but not the other header. + */ + @Test + public void testMetadataCopySkipsMagicAttribute() throws Throwable { + + final String owner = "x-header-owner"; + final String root = "root"; + CONTEXT_ACCESSORS.userHeaders.put(owner, root); + final ObjectMetadata source = context.getContextAccessors() + .getObjectMetadata(MAGIC_KEY); + final Map sourceUserMD = source.getUserMetadata(); + Assertions.assertThat(sourceUserMD.get(owner)) + .describedAs("owner header in copied MD") + .isEqualTo(root); + + ObjectMetadata dest = new ObjectMetadata(); + headerProcessing.cloneObjectMetadata(source, dest); + + Assertions.assertThat(dest.getUserMetadata().get(X_HEADER_MAGIC_MARKER)) + .describedAs("Magic marker header in copied MD") + .isNull(); + Assertions.assertThat(dest.getUserMetadata().get(owner)) + .describedAs("owner header in copied MD") + .isEqualTo(root); + } + + /** + * Assert that an XAttr has a specific long value. + * @param key attribute key + * @param bytes bytes of the attribute. + * @param expected expected numeric value. + */ + private void assertLongAttributeValue( + final String key, + final byte[] bytes, + final long expected) { + Assertions.assertThat(extractXAttrLongValue(bytes)) + .describedAs("XAttribute " + key) + .isNotEmpty() + .hasValue(expected); + } + + /** + * Assert that a retrieved XAttr has a specific long value. + * @param key attribute key + * @param expected expected numeric value. + */ + protected void assertAttributeHasValue(final String key, + final long expected) + throws IOException { + assertLongAttributeValue( + key, + headerProcessing.getXAttr(MAGIC_PATH, key), + expected); + } + + /** + * Context accessor with XAttrs returned for the {@link #MAGIC_PATH} + * path. + */ + private static final class XAttrContextAccessor + implements ContextAccessors { + + private final Map userHeaders = new HashMap<>(); + + private long len; + private Date date = new Date(currentTimeMillis()); + + @Override + public Path keyToPath(final String key) { + return new Path("s3a://bucket/" + key); + } + + @Override + public String pathToKey(final Path path) { + // key is path with leading / stripped. + String key = path.toUri().getPath(); + return key.length() > 1 ? key.substring(1) : key; + } + + @Override + public File createTempFile(final String prefix, final long size) + throws IOException { + throw new UnsupportedOperationException("unsppported"); + } + + @Override + public String getBucketLocation() throws IOException { + return null; + } + + @Override + public Path makeQualified(final Path path) { + return path; + } + + @Override + public ObjectMetadata getObjectMetadata(final String key) + throws IOException { + if (MAGIC_KEY.equals(key)) { + ObjectMetadata omd = new ObjectMetadata(); + omd.setUserMetadata(userHeaders); + omd.setContentLength(len); + omd.setLastModified(date); + return omd; + } else { + throw new FileNotFoundException(key); + } + } + + public void setHeader(String key, String val) { + userHeaders.put(key, val); + } + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNetworkBinding.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNetworkBinding.java new file mode 100644 index 0000000000000..10fe339174fc5 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNetworkBinding.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.impl; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.client.builder.AwsClientBuilder; +import org.junit.Ignore; +import org.junit.Test; + +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.s3a.DefaultS3ClientFactory.createEndpointConfiguration; +import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.fixBucketRegion; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Unit tests related to the {@link NetworkBinding} class. + */ +public class TestNetworkBinding extends AbstractHadoopTestBase { + + private static final String US_EAST_1 = "us-east-1"; + + private static final String US_WEST_2 = "us-west-2"; + + @Test + public void testUSEast() { + assertRegionFixup(US_EAST_1, US_EAST_1); + } + + @Test + public void testUSWest() { + assertRegionFixup(US_WEST_2, US_WEST_2); + } + + @Test + public void testRegionUStoUSEast() { + assertRegionFixup("US", US_EAST_1); + } + + @Test + public void testRegionNullToUSEast() { + assertRegionFixup(null, US_EAST_1); + } + + private static void assertRegionFixup(String region, String expected) { + assertThat(fixBucketRegion(region)) + .describedAs("Fixup of %s", region) + .isEqualTo(expected); + } + + @Test + public void testNull() throws Throwable { + expectEndpoint("", true, "unused"); + } + + @Test + @Ignore("disabled until endpoint logic works for S3 client builder API") + public void testUSEastEndpoint() throws Throwable { + expectEndpoint(US_EAST_1, false, US_EAST_1); + } + + @Test + @Ignore("disabled until endpoint logic works for S3 client builder API") + public void testUSWestEndpoint() throws Throwable { + expectEndpoint(US_WEST_2, false, US_WEST_2); + } + + public void expectEndpoint(final String src, + final boolean expectNull, + final String expectRegion) { + AwsClientBuilder.EndpointConfiguration epr = + createEndpointConfiguration(src, new ClientConfiguration()); + String eprStr = epr == null + ? "(empty)" + : ("(" + epr.getServiceEndpoint() + " " + epr.getSigningRegion()); + if (expectNull) { + assertThat(epr) + .describedAs("Endpoint configuration of %s =", + src, eprStr) + .isNull(); + } else { + assertThat(epr) + .describedAs("Endpoint configuration of %s =", + src, eprStr) + .hasFieldOrPropertyWithValue("serviceEndpoint", src) + .hasFieldOrPropertyWithValue("signingRegion", expectRegion); + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNeworkBinding.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNeworkBinding.java deleted file mode 100644 index eebc3bfdf2fe4..0000000000000 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestNeworkBinding.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.s3a.impl; - -import org.assertj.core.api.Assertions; -import org.junit.Test; - -import org.apache.hadoop.test.HadoopTestBase; - -import static org.apache.hadoop.fs.s3a.impl.NetworkBinding.fixBucketRegion; - -/** - * Unit tests related to the {@link NetworkBinding} class. - */ -public class TestNeworkBinding extends HadoopTestBase { - - private static final String US_EAST_1 = "us-east-1"; - private static final String US_WEST_2 = "us-west-2"; - - @Test - public void testUSEast() { - assertRegionFixup(US_EAST_1, US_EAST_1); - } - - @Test - public void testUSWest() { - assertRegionFixup(US_WEST_2, US_WEST_2); - } - - @Test - public void testRegionUStoUSEast() { - assertRegionFixup("US", US_EAST_1); - } - - @Test - public void testRegionNullToUSEast() { - assertRegionFixup(null, US_EAST_1); - } - - private static void assertRegionFixup(String region, String expected) { - Assertions.assertThat(fixBucketRegion(region)) - .describedAs("Fixup of %s", region) - .isEqualTo(expected); - } -} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java index 244d2eed324c7..42714cb1555e4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestPartialDeleteFailures.java @@ -18,51 +18,31 @@ package org.apache.hadoop.fs.s3a.impl; -import javax.annotation.Nullable; import java.io.File; import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.MultiObjectDeleteException; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import com.amazonaws.services.s3.model.ObjectMetadata; import org.assertj.core.api.Assertions; import org.junit.Before; import org.junit.Test; import org.apache.commons.lang3.tuple.Pair; import org.apache.commons.lang3.tuple.Triple; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.s3a.Constants; -import org.apache.hadoop.fs.s3a.Invoker; -import org.apache.hadoop.fs.s3a.S3AFileStatus; -import org.apache.hadoop.fs.s3a.S3AInputPolicy; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; -import org.apache.hadoop.fs.s3a.S3AStorageStatistics; -import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; -import org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata; -import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; -import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; -import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; -import org.apache.hadoop.fs.s3a.s3guard.RenameTracker; -import org.apache.hadoop.fs.s3a.s3guard.S3Guard; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.BlockingThreadPoolExecutorService; +import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.fs.s3a.test.OperationTrackingStore; import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.ACCESS_DENIED; import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.removeUndeletedPaths; +import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.toPathList; import static org.junit.Assert.assertEquals; /** @@ -80,36 +60,42 @@ private static Path qualifyKey(String k) { return new Path("s3a://bucket/" + k); } + private static String toKey(Path path) { + return path.toUri().getPath(); + } + @Before public void setUp() throws Exception { - context = createMockStoreContext(true, - new OperationTrackingStore()); + context = S3ATestUtils.createMockStoreContext(true, + new OperationTrackingStore(), CONTEXT_ACCESSORS); } @Test public void testDeleteExtraction() { - List src = pathList("a", "a/b", "a/c"); - List rejected = pathList("a/b"); + List src = pathList("a", "a/b", "a/c"); + List rejected = pathList("a/b"); MultiObjectDeleteException ex = createDeleteException(ACCESS_DENIED, rejected); - List undeleted = removeUndeletedPaths(ex, src, - TestPartialDeleteFailures::qualifyKey); + List undeleted = + removeUndeletedPaths(ex, src, + TestPartialDeleteFailures::qualifyKey); assertEquals("mismatch of rejected and undeleted entries", rejected, undeleted); } @Test public void testSplitKeysFromResults() throws Throwable { - List src = pathList("a", "a/b", "a/c"); - List rejected = pathList("a/b"); - List keys = keysToDelete(src); + List src = pathList("a", "a/b", "a/c"); + List rejected = pathList("a/b"); + List keys = keysToDelete(toPathList(src)); MultiObjectDeleteException ex = createDeleteException(ACCESS_DENIED, rejected); - Pair, List> pair = + Pair, + List> pair = new MultiObjectDeleteSupport(context, null) .splitUndeletedKeys(ex, keys); - List undeleted = pair.getLeft(); - List deleted = pair.getRight(); + List undeleted = pair.getLeft(); + List deleted = pair.getRight(); assertEquals(rejected, undeleted); // now check the deleted list to verify that it is valid src.remove(rejected.get(0)); @@ -121,9 +107,12 @@ public void testSplitKeysFromResults() throws Throwable { * @param paths paths to qualify and then convert to a lst. * @return same paths as a list. */ - private List pathList(String... paths) { + private List pathList(String... paths) { return Arrays.stream(paths) - .map(TestPartialDeleteFailures::qualifyKey) + .map(k-> + new MultiObjectDeleteSupport.KeyPath(k, + qualifyKey(k), + k.endsWith("/"))) .collect(Collectors.toList()); } @@ -135,12 +124,13 @@ private List pathList(String... paths) { */ private MultiObjectDeleteException createDeleteException( final String code, - final List rejected) { + final List rejected) { List errors = rejected.stream() - .map((p) -> { + .map((kp) -> { + Path p = kp.getPath(); MultiObjectDeleteException.DeleteError e = new MultiObjectDeleteException.DeleteError(); - e.setKey(p.toUri().getPath()); + e.setKey(kp.getKey()); e.setCode(code); e.setMessage("forbidden"); return e; @@ -149,14 +139,33 @@ private MultiObjectDeleteException createDeleteException( } /** - * From a list of paths, build up the list of keys for a delete request. + * From a list of paths, build up the list of KeyVersion records + * for a delete request. + * All the entries will be files (i.e. no trailing /) * @param paths path list * @return a key list suitable for a delete request. */ public static List keysToDelete( List paths) { return paths.stream() - .map((p) -> p.toUri().getPath()) + .map(p -> { + String uripath = p.toUri().getPath(); + return uripath.substring(1); + }) + .map(DeleteObjectsRequest.KeyVersion::new) + .collect(Collectors.toList()); + } + + /** + * From a list of keys, build up the list of keys for a delete request. + * If a key has a trailing /, that will be retained, so it will be + * considered a directory during multi-object delete failure handling + * @param keys key list + * @return a key list suitable for a delete request. + */ + public static List toDeleteRequests( + List keys) { + return keys.stream() .map(DeleteObjectsRequest.KeyVersion::new) .collect(Collectors.toList()); } @@ -167,22 +176,33 @@ public static List keysToDelete( */ @Test public void testProcessDeleteFailure() throws Throwable { - Path pathA = qualifyKey("/a"); - Path pathAB = qualifyKey("/a/b"); - Path pathAC = qualifyKey("/a/c"); + String keyA = "/a/"; + String keyAB = "/a/b"; + String keyAC = "/a/c"; + Path pathA = qualifyKey(keyA); + Path pathAB = qualifyKey(keyAB); + Path pathAC = qualifyKey(keyAC); + List srcKeys = Lists.newArrayList(keyA, keyAB, keyAC); List src = Lists.newArrayList(pathA, pathAB, pathAC); - List keyList = keysToDelete(src); + List keyList = toDeleteRequests(srcKeys); List deleteForbidden = Lists.newArrayList(pathAB); final List deleteAllowed = Lists.newArrayList(pathA, pathAC); + List forbiddenKP = + Lists.newArrayList( + new MultiObjectDeleteSupport.KeyPath(keyAB, pathAB, true)); MultiObjectDeleteException ex = createDeleteException(ACCESS_DENIED, - deleteForbidden); + forbiddenKP); OperationTrackingStore store = new OperationTrackingStore(); - StoreContext storeContext = createMockStoreContext(true, store); + StoreContext storeContext = S3ATestUtils + .createMockStoreContext(true, store, CONTEXT_ACCESSORS); MultiObjectDeleteSupport deleteSupport = new MultiObjectDeleteSupport(storeContext, null); + List retainedMarkers = new ArrayList<>(); Triple, List, List>> - triple = deleteSupport.processDeleteFailure(ex, keyList); + triple = deleteSupport.processDeleteFailure(ex, + keyList, + retainedMarkers); Assertions.assertThat(triple.getRight()) .as("failure list") .isEmpty(); @@ -196,39 +216,19 @@ public void testProcessDeleteFailure() throws Throwable { as("undeleted store entries") .containsAll(deleteForbidden) .doesNotContainAnyElementsOf(deleteAllowed); + // because dir marker retention is on, we expect at least one retained + // marker + Assertions.assertThat(retainedMarkers). + as("Retained Markers") + .containsExactly(pathA); + Assertions.assertThat(store.getDeleted()). + as("List of tombstoned records") + .doesNotContain(pathA); } - private StoreContext createMockStoreContext(boolean multiDelete, - OperationTrackingStore store) throws URISyntaxException, IOException { - URI name = new URI("s3a://bucket"); - Configuration conf = new Configuration(); - return new StoreContext( - name, - "bucket", - conf, - "alice", - UserGroupInformation.getCurrentUser(), - BlockingThreadPoolExecutorService.newInstance( - 4, - 4, - 10, TimeUnit.SECONDS, - "s3a-transfer-shared"), - Constants.DEFAULT_EXECUTOR_CAPACITY, - new Invoker(RetryPolicies.TRY_ONCE_THEN_FAIL, Invoker.LOG_EVENT), - new S3AInstrumentation(name), - new S3AStorageStatistics(), - S3AInputPolicy.Normal, - ChangeDetectionPolicy.createPolicy(ChangeDetectionPolicy.Mode.None, - ChangeDetectionPolicy.Source.ETag, false), - multiDelete, - store, - false, - CONTEXT_ACCESSORS, - new S3Guard.TtlTimeProvider(conf)); - } - - private static class MinimalContextAccessor implements ContextAccessors { + private static final class MinimalContextAccessor + implements ContextAccessors { @Override public Path keyToPath(final String key) { @@ -251,154 +251,15 @@ public String getBucketLocation() throws IOException { return null; } - } - /** - * MetadataStore which tracks what is deleted and added. - */ - private static class OperationTrackingStore implements MetadataStore { - - private final List deleted = new ArrayList<>(); - - private final List created = new ArrayList<>(); - - @Override - public void initialize(final FileSystem fs, - ITtlTimeProvider ttlTimeProvider) { - } - - @Override - public void initialize(final Configuration conf, - ITtlTimeProvider ttlTimeProvider) { - } - - @Override - public void forgetMetadata(final Path path) { - } - - @Override - public PathMetadata get(final Path path) { - return null; - } - - @Override - public PathMetadata get(final Path path, - final boolean wantEmptyDirectoryFlag) { - return null; - } - - @Override - public DirListingMetadata listChildren(final Path path) { - return null; - } - - @Override - public void put(final PathMetadata meta) { - put(meta, null); - } - - @Override - public void put(final PathMetadata meta, - final BulkOperationState operationState) { - created.add(meta.getFileStatus().getPath()); - } - - @Override - public void put(final Collection metas, - final BulkOperationState operationState) { - metas.stream().forEach(meta -> put(meta, null)); - } - @Override - public void put(final DirListingMetadata meta, - final List unchangedEntries, - final BulkOperationState operationState) { - created.add(meta.getPath()); + public Path makeQualified(final Path path) { + return path; } @Override - public void destroy() { - } - - @Override - public void delete(final Path path, - final BulkOperationState operationState) { - deleted.add(path); - } - - @Override - public void deletePaths(final Collection paths, - @Nullable final BulkOperationState operationState) throws IOException { - deleted.addAll(paths); - } - - @Override - public void deleteSubtree(final Path path, - final BulkOperationState operationState) { - - } - - @Override - public void move(@Nullable final Collection pathsToDelete, - @Nullable final Collection pathsToCreate, - @Nullable final BulkOperationState operationState) { - } - - @Override - public void prune(final PruneMode pruneMode, final long cutoff) { - } - - @Override - public long prune(final PruneMode pruneMode, - final long cutoff, - final String keyPrefix) { - return 0; - } - - @Override - public BulkOperationState initiateBulkWrite( - final BulkOperationState.OperationType operation, - final Path dest) { - return null; - } - - @Override - public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) { - } - - @Override - public Map getDiagnostics() { - return null; - } - - @Override - public void updateParameters(final Map parameters) { - } - - @Override - public void close() { - } - - public List getDeleted() { - return deleted; - } - - public List getCreated() { - return created; - } - - @Override - public RenameTracker initiateRenameOperation( - final StoreContext storeContext, - final Path source, - final S3AFileStatus sourceStatus, - final Path dest) { - throw new UnsupportedOperationException("unsupported"); - } - - @Override - public void addAncestors(final Path qualifiedPath, - @Nullable final BulkOperationState operationState) { - + public ObjectMetadata getObjectMetadata(final String key) + throws IOException { + return new ObjectMetadata(); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AMultipartUploaderSupport.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestS3AMultipartUploaderSupport.java similarity index 56% rename from hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AMultipartUploaderSupport.java rename to hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestS3AMultipartUploaderSupport.java index 4825d26eeb068..71305aa6633e0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AMultipartUploaderSupport.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestS3AMultipartUploaderSupport.java @@ -16,51 +16,60 @@ * limitations under the License. */ -package org.apache.hadoop.fs.s3a; +package org.apache.hadoop.fs.s3a.impl; import java.io.EOFException; import java.io.IOException; import org.junit.Test; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.test.HadoopTestBase; -import static org.apache.hadoop.fs.s3a.S3AMultipartUploader.*; -import static org.apache.hadoop.fs.s3a.S3AMultipartUploader.parsePartHandlePayload; +import static org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader.PartHandlePayload; +import static org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader.buildPartHandlePayload; +import static org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader.parsePartHandlePayload; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** - * Test multipart upload support methods and classes. + * Unit test of multipart upload support methods and classes. */ public class TestS3AMultipartUploaderSupport extends HadoopTestBase { + public static final String PATH = "s3a://bucket/path"; + + public static final String UPLOAD = "01"; + @Test public void testRoundTrip() throws Throwable { - Pair result = roundTrip("tag", 1); - assertEquals("tag", result.getRight()); - assertEquals(1, result.getLeft().longValue()); + PartHandlePayload result = roundTrip(999, "tag", 1); + assertEquals(PATH, result.getPath()); + assertEquals(UPLOAD, result.getUploadId()); + assertEquals(999, result.getPartNumber()); + assertEquals("tag", result.getEtag()); + assertEquals(1, result.getLen()); } @Test public void testRoundTrip2() throws Throwable { long len = 1L + Integer.MAX_VALUE; - Pair result = roundTrip("11223344", - len); - assertEquals("11223344", result.getRight()); - assertEquals(len, result.getLeft().longValue()); + PartHandlePayload result = + roundTrip(1, "11223344", len); + assertEquals(1, result.getPartNumber()); + assertEquals("11223344", result.getEtag()); + assertEquals(len, result.getLen()); } @Test public void testNoEtag() throws Throwable { intercept(IllegalArgumentException.class, - () -> buildPartHandlePayload("", 1)); + () -> buildPartHandlePayload(PATH, UPLOAD, + 0, "", 1)); } @Test public void testNoLen() throws Throwable { intercept(IllegalArgumentException.class, - () -> buildPartHandlePayload("tag", -1)); + () -> buildPartHandlePayload(PATH, UPLOAD, 0, "tag", -1)); } @Test @@ -71,14 +80,17 @@ public void testBadPayload() throws Throwable { @Test public void testBadHeader() throws Throwable { - byte[] bytes = buildPartHandlePayload("tag", 1); - bytes[2]='f'; + byte[] bytes = buildPartHandlePayload(PATH, UPLOAD, 0, "tag", 1); + bytes[2] = 'f'; intercept(IOException.class, "header", () -> parsePartHandlePayload(bytes)); } - private Pair roundTrip(final String tag, final long len) throws IOException { - byte[] bytes = buildPartHandlePayload(tag, len); + private PartHandlePayload roundTrip( + int partNumber, + String tag, + long len) throws IOException { + byte[] bytes = buildPartHandlePayload(PATH, UPLOAD, partNumber, tag, len); return parsePartHandlePayload(bytes); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java new file mode 100644 index 0000000000000..c4f8db71937d0 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java @@ -0,0 +1,683 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.performance; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.EnumSet; +import java.util.Set; +import java.util.concurrent.Callable; + +import org.assertj.core.api.Assertions; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FSDataOutputStreamBuilder; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.Tristate; +import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy; +import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; +import org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum; + +import static org.apache.hadoop.fs.s3a.Constants.*; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_BULK_DELETE_REQUEST; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_DELETE_REQUEST; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.*; +import static org.apache.hadoop.fs.s3a.performance.OperationCostValidator.expect; +import static org.apache.hadoop.fs.s3a.performance.OperationCostValidator.probe; +import static org.apache.hadoop.test.AssertExtensions.dynamicDescription; + +/** + * Abstract class for tests which make assertions about cost. + *

    + * Factored out from {@code ITestS3AFileOperationCost} + */ +public class AbstractS3ACostTest extends AbstractS3ATestBase { + + /** + * Parameter: should the stores be guarded? + */ + private final boolean s3guard; + + /** + * Parameter: should directory markers be retained? + */ + private final boolean keepMarkers; + + /** + * Is this an auth mode test run? + */ + private final boolean authoritative; + + /** probe states calculated from the configuration options. */ + private boolean isGuarded; + + private boolean isRaw; + + private boolean isAuthoritative; + + private boolean isNonAuth; + + private boolean isKeeping; + + private boolean isDeleting; + + private OperationCostValidator costValidator; + + /** + * Is bulk deletion enabled? + */ + private boolean isBulkDelete; + + /** + * Which statistic measures marker deletion? + * this is the bulk delete statistic by default; + * if that is disabled it becomes the single delete counter. + */ + private Statistic deleteMarkerStatistic; + + public AbstractS3ACostTest( + final boolean s3guard, + final boolean keepMarkers, + final boolean authoritative) { + this.s3guard = s3guard; + this.keepMarkers = keepMarkers; + this.authoritative = authoritative; + } + + @Override + public Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + String bucketName = getTestBucketName(conf); + removeBucketOverrides(bucketName, conf, + S3_METADATA_STORE_IMPL); + if (!isGuarded()) { + // in a raw run remove all s3guard settings + removeBaseAndBucketOverrides(bucketName, conf, + S3_METADATA_STORE_IMPL); + } + removeBaseAndBucketOverrides(bucketName, conf, + DIRECTORY_MARKER_POLICY, + METADATASTORE_AUTHORITATIVE, + AUTHORITATIVE_PATH); + // directory marker options + conf.set(DIRECTORY_MARKER_POLICY, + keepMarkers + ? DIRECTORY_MARKER_POLICY_KEEP + : DIRECTORY_MARKER_POLICY_DELETE); + if (isGuarded()) { + conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_DYNAMO); + conf.setBoolean(METADATASTORE_AUTHORITATIVE, authoritative); + } + disableFilesystemCaching(conf); + return conf; + } + + @Override + public void setup() throws Exception { + super.setup(); + if (isGuarded()) { + // s3guard is required for those test runs where any of the + // guard options are set + assumeS3GuardState(true, getConfiguration()); + } + S3AFileSystem fs = getFileSystem(); + skipDuringFaultInjection(fs); + + // build up the states + isGuarded = isGuarded(); + + isRaw = !isGuarded; + isAuthoritative = isGuarded && authoritative; + isNonAuth = isGuarded && !authoritative; + + isKeeping = isKeepingMarkers(); + + isDeleting = !isKeeping; + + // check that the FS has the expected state + DirectoryPolicy markerPolicy = fs.getDirectoryMarkerPolicy(); + Assertions.assertThat(markerPolicy.getMarkerPolicy()) + .describedAs("Marker policy for filesystem %s", fs) + .isEqualTo(isKeepingMarkers() + ? DirectoryPolicy.MarkerPolicy.Keep + : DirectoryPolicy.MarkerPolicy.Delete); + // All counter statistics of the filesystem are added as metrics. + // Durations too, as they have counters of success and failure. + OperationCostValidator.Builder builder = OperationCostValidator.builder( + getFileSystem()); + EnumSet.allOf(Statistic.class).stream() + .filter(s -> + s.getType() == StatisticTypeEnum.TYPE_COUNTER + || s.getType() == StatisticTypeEnum.TYPE_DURATION) + .forEach(s -> builder.withMetric(s)); + costValidator = builder.build(); + + // determine bulk delete settings + final Configuration fsConf = getFileSystem().getConf(); + isBulkDelete = fsConf.getBoolean(Constants.ENABLE_MULTI_DELETE, + true); + deleteMarkerStatistic = isBulkDelete() + ? OBJECT_BULK_DELETE_REQUEST + : OBJECT_DELETE_REQUEST; + } + + public void assumeUnguarded() { + assume("Unguarded FS only", !isGuarded()); + } + + /** + * Is the store guarded authoritatively on the test path? + * @return true if the condition is met on this test run. + */ + public boolean isAuthoritative() { + return authoritative; + } + + /** + * Is the store guarded? + * @return true if the condition is met on this test run. + */ + public boolean isGuarded() { + return s3guard; + } + + /** + * Is the store raw? + * @return true if the condition is met on this test run. + */ + public boolean isRaw() { + return isRaw; + } + + /** + * Is the store guarded non-authoritatively on the test path? + * @return true if the condition is met on this test run. + */ + public boolean isNonAuth() { + return isNonAuth; + } + + public boolean isDeleting() { + return isDeleting; + } + + public boolean isKeepingMarkers() { + return keepMarkers; + } + + /** + * A special object whose toString() value is the current + * state of the metrics. + */ + protected Object getMetricSummary() { + return costValidator; + } + + /** + * Create then close the file through the builder API. + * @param path path + * @param overwrite overwrite flag + * @param recursive true == skip parent existence check + * @param cost expected cost + * @return path to new object. + */ + protected Path buildFile(Path path, + boolean overwrite, + boolean recursive, + OperationCost cost) throws Exception { + resetStatistics(); + verifyRaw(cost, () -> { + FSDataOutputStreamBuilder builder = getFileSystem().createFile(path) + .overwrite(overwrite); + if (recursive) { + builder.recursive(); + } + FSDataOutputStream stream = builder.build(); + stream.close(); + return stream.toString(); + }); + return path; + } + + /** + * Create a directory, returning its path. + * @param p path to dir. + * @return path of new dir + */ + protected Path dir(Path p) throws IOException { + mkdirs(p); + return p; + } + + /** + * Create a file, returning its path. + * @param p path to file. + * @return path of new file + */ + protected Path file(Path p) throws IOException { + return file(p, true); + } + + /** + * Create a file, returning its path. + * @param path path to file. + * @param overwrite overwrite flag + * @return path of new file + */ + protected Path file(Path path, final boolean overwrite) + throws IOException { + getFileSystem().create(path, overwrite).close(); + return path; + } + + /** + * Touch a file, overwriting. + * @param path path + * @return path to new object. + */ + protected Path create(Path path) throws Exception { + return create(path, true, CREATE_FILE_OVERWRITE); + } + + /** + * Create then close the file. + * @param path path + * @param overwrite overwrite flag + * @param cost expected cost + + * @return path to new object. + */ + protected Path create(Path path, boolean overwrite, + OperationCost cost) throws Exception { + return verifyRaw(cost, () -> + file(path, overwrite)); + } + + /** + * Execute rename, returning the current metrics. + * For use in l-expressions. + * @param source source path. + * @param dest dest path + * @return a string for exceptions. + */ + public String execRename(final Path source, + final Path dest) throws IOException { + getFileSystem().rename(source, dest); + return String.format("rename(%s, %s): %s", + dest, source, getMetricSummary()); + } + + /** + * How many directories are in a path? + * @param path path to probe. + * @return the number of entries below root this path is + */ + protected int directoriesInPath(Path path) { + return path.isRoot() ? 0 : 1 + directoriesInPath(path.getParent()); + } + + /** + * Reset all the metrics being tracked. + */ + private void resetStatistics() { + costValidator.resetMetricDiffs(); + } + + /** + * Execute a closure and verify the metrics. + * @param eval closure to evaluate + * @param expected varargs list of expected diffs + * @param return type. + * @return the result of the evaluation + */ + protected T verifyMetrics( + Callable eval, + OperationCostValidator.ExpectedProbe... expected) throws Exception { + return costValidator.exec(eval, expected); + + } + + /** + * Execute a closure, expecting an exception. + * Verify the metrics after the exception has been caught and + * validated. + * @param clazz type of exception + * @param text text to look for in exception (optional) + * @param eval closure to evaluate + * @param expected varargs list of expected diffs + * @param return type of closure + * @param exception type + * @return the exception caught. + * @throws Exception any other exception + */ + protected E verifyMetricsIntercepting( + Class clazz, + String text, + Callable eval, + OperationCostValidator.ExpectedProbe... expected) throws Exception { + return costValidator.intercepting(clazz, text, eval, expected); + } + + /** + * Execute a closure expecting an exception. + * @param clazz type of exception + * @param text text to look for in exception (optional) + * @param cost expected cost declaration. + * @param eval closure to evaluate + * @param return type of closure + * @param exception type + * @return the exception caught. + * @throws Exception any other exception + */ + protected E interceptRaw( + Class clazz, + String text, + OperationCost cost, + Callable eval) throws Exception { + return verifyMetricsIntercepting(clazz, text, eval, whenRaw(cost)); + } + + /** + * Declare the expected cost on any FS. + * @param cost costs to expect + * @return a probe. + */ + protected OperationCostValidator.ExpectedProbe always( + OperationCost cost) { + return expect(true, cost); + } + + /** + * Declare the expected cost on a raw FS. + * @param cost costs to expect + * @return a probe. + */ + protected OperationCostValidator.ExpectedProbe whenRaw( + OperationCost cost) { + return expect(isRaw(), cost); + } + + /** + * Declare the expected cost on a guarded FS. + * @param cost costs to expect + * @return a probe. + */ + protected OperationCostValidator.ExpectedProbe whenGuarded( + OperationCost cost) { + return expect(isGuarded(), cost); + } + + /** + * Declare the expected cost on a guarded auth FS. + * @param cost costs to expect + * @return a probe. + */ + protected OperationCostValidator.ExpectedProbe whenAuthoritative( + OperationCost cost) { + return expect(isAuthoritative(), cost); + } + + + /** + * Declare the expected cost on a guarded nonauth FS. + * @param cost costs to expect + * @return a probe. + */ + protected OperationCostValidator.ExpectedProbe whenNonauth( + OperationCost cost) { + return expect(isNonAuth(), cost); + } + + + /** + * A metric diff which must hold when the fs is keeping markers. + * @param cost expected cost + * @return the diff. + */ + protected OperationCostValidator.ExpectedProbe whenKeeping( + OperationCost cost) { + return expect(isKeepingMarkers(), cost); + } + + /** + * A metric diff which must hold when the fs is keeping markers. + * @param cost expected cost + * @return the diff. + */ + protected OperationCostValidator.ExpectedProbe whenDeleting( + OperationCost cost) { + return expect(isDeleting(), cost); + } + + /** + * Execute a closure expecting a specific number of HEAD/LIST calls + * on raw S3 stores only. The operation is always evaluated. + * @param cost expected cost + * @param eval closure to evaluate + * @param return type of closure + * @return the result of the evaluation + */ + protected T verifyRaw( + OperationCost cost, + Callable eval) throws Exception { + return verifyMetrics(eval, + whenRaw(cost), OperationCostValidator.always()); + } + + /** + * Execute {@code S3AFileSystem#innerGetFileStatus(Path, boolean, Set)} + * for the given probes. + * expect the specific HEAD/LIST count with a raw FS. + * @param path path + * @param needEmptyDirectoryFlag look for empty directory + * @param probes file status probes to perform + * @param cost expected cost + * @return the status + */ + public S3AFileStatus verifyRawInnerGetFileStatus( + Path path, + boolean needEmptyDirectoryFlag, + Set probes, + OperationCost cost) throws Exception { + return verifyRaw(cost, () -> + innerGetFileStatus(getFileSystem(), + path, + needEmptyDirectoryFlag, + probes)); + } + + /** + * Execute {@code S3AFileSystem#innerGetFileStatus(Path, boolean, Set)} + * for the given probes -expect a FileNotFoundException, + * and the specific HEAD/LIST count with a raw FS. + * @param path path + * @param needEmptyDirectoryFlag look for empty directory + * @param probes file status probes to perform + * @param cost expected cost + */ + + public void interceptRawGetFileStatusFNFE( + Path path, + boolean needEmptyDirectoryFlag, + Set probes, + OperationCost cost) throws Exception { + interceptRaw(FileNotFoundException.class, "", + cost, () -> + innerGetFileStatus(getFileSystem(), + path, + needEmptyDirectoryFlag, + probes)); + } + + /** + * Probe for a path being a directory. + * Metrics are only checked on unguarded stores. + * @param path path + * @param expected expected outcome + * @param cost expected cost on a Raw FS. + */ + protected void isDir(Path path, + boolean expected, + OperationCost cost) throws Exception { + boolean b = verifyRaw(cost, () -> + getFileSystem().isDirectory(path)); + Assertions.assertThat(b) + .describedAs("isDirectory(%s)", path) + .isEqualTo(expected); + } + + /** + * Probe for a path being a file. + * Metrics are only checked on unguarded stores. + * @param path path + * @param expected expected outcome + * @param cost expected cost on a Raw FS. + */ + protected void isFile(Path path, + boolean expected, + OperationCost cost) throws Exception { + boolean b = verifyRaw(cost, () -> + getFileSystem().isFile(path)); + Assertions.assertThat(b) + .describedAs("isFile(%s)", path) + .isEqualTo(expected); + } + + /** + * A metric diff which must always hold. + * @param stat metric source + * @param expected expected value. + * @return the diff. + */ + protected OperationCostValidator.ExpectedProbe with( + final Statistic stat, final int expected) { + return probe(stat, expected); + } + + /** + * A metric diff which must hold when the fs is unguarded. + * @param stat metric source + * @param expected expected value. + * @return the diff. + */ + protected OperationCostValidator.ExpectedProbe withWhenRaw( + final Statistic stat, final int expected) { + return probe(isRaw(), stat, expected); + } + + /** + * A metric diff which must hold when the fs is guarded. + * @param stat metric source + * @param expected expected value. + * @return the diff. + */ + protected OperationCostValidator.ExpectedProbe withWhenGuarded( + final Statistic stat, + final int expected) { + return probe(isGuarded(), stat, expected); + } + + /** + * A metric diff which must hold when the fs is guarded + authoritative. + * @param stat metric source + * @param expected expected value. + * @return the diff. + */ + protected OperationCostValidator.ExpectedProbe withWhenAuthoritative( + final Statistic stat, + final int expected) { + return probe(isAuthoritative(), stat, expected); + } + + /** + * A metric diff which must hold when the fs is guarded + authoritative. + * @param stat metric source + * @param expected expected value. + * @return the diff. + */ + protected OperationCostValidator.ExpectedProbe withWhenNonauth( + final Statistic stat, + final int expected) { + return probe(isNonAuth(), stat, expected); + } + + /** + * A metric diff which must hold when the fs is keeping markers. + * @param stat metric source + * @param expected expected value. + * @return the diff. + */ + protected OperationCostValidator.ExpectedProbe withWhenKeeping( + final Statistic stat, + final int expected) { + return probe(isKeepingMarkers(), stat, expected); + } + + /** + * A metric diff which must hold when the fs is keeping markers. + * @param stat metric source + * @param expected expected value. + * @return the diff. + */ + protected OperationCostValidator.ExpectedProbe withWhenDeleting( + final Statistic stat, + final int expected) { + return probe(isDeleting(), stat, expected); + } + + /** + * Assert the empty directory status of a file is as expected. + * The raised assertion message includes a list of the path. + * @param status status to probe. + * @param expected expected value + */ + protected void assertEmptyDirStatus(final S3AFileStatus status, + final Tristate expected) { + Assertions.assertThat(status.isEmptyDirectory()) + .describedAs(dynamicDescription(() -> + "FileStatus says directory is not empty: " + status + + "\n" + ContractTestUtils.ls( + getFileSystem(), status.getPath()))) + .isEqualTo(expected); + } + + /** + * Is bulk deletion enabled? + */ + protected boolean isBulkDelete() { + return isBulkDelete; + } + + /** + * Which statistic measures marker deletion? + * this is the bulk delete statistic by default; + * if that is disabled it becomes the single delete counter. + */ + protected Statistic getDeleteMarkerStatistic() { + return deleteMarkerStatistic; + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java new file mode 100644 index 0000000000000..ed56802ddfec1 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java @@ -0,0 +1,824 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.performance; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.stream.Collectors; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.ObjectMetadata; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.S3AUtils; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; +import static org.apache.hadoop.fs.s3a.Constants.AUTHORITATIVE_PATH; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP; +import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; +import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +/** + * This is a test suite designed to verify that directory markers do + * not get misconstrued as empty directories during operations + * which explicitly or implicitly list directory trees. + *

    + * It is also intended it to be backported to all releases + * which are enhanced to read directory trees where markers have + * been retained. + * Hence: it does not use any of the new helper classes to + * measure the cost of operations or attempt to create markers + * through the FS APIs. + *

    + * Instead, the directory structure to test is created through + * low-level S3 SDK API calls. + * We also skip any probes to measure/assert metrics. + * We're testing the semantics here, not the cost of the operations. + * Doing that makes it a lot easier to backport. + * + *

    + * Similarly: JUnit assertions over AssertJ. + *

    + * The tests work with unguarded buckets only -the bucket settings are changed + * appropriately. + */ +@RunWith(Parameterized.class) +public class ITestDirectoryMarkerListing extends AbstractS3ATestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestDirectoryMarkerListing.class); + + private static final String FILENAME = "fileUnderMarker"; + + private static final String HELLO = "hello"; + + private static final String MARKER = "marker"; + + private static final String MARKER_PEER = "markerpeer"; + + /** + * Parameterization. + */ + @Parameterized.Parameters(name = "{0}") + public static Collection params() { + return Arrays.asList(new Object[][]{ + {"keep-markers", true}, + {"delete-markers", false}, + }); + } + + /** + * Does rename copy markers? + * Value: {@value} + *

    + * Older releases: yes. + *

    + * The full marker-optimized releases: no. + */ + private static final boolean RENAME_COPIES_MARKERS = false; + + /** + * Test configuration name. + */ + private final String name; + + /** + * Does this test configuration keep markers? + */ + private final boolean keepMarkers; + + /** + * Is this FS deleting markers? + */ + private final boolean isDeletingMarkers; + + /** + * Path to a directory which has a marker. + */ + private Path markerDir; + + /** + * Key to the object representing {@link #markerDir}. + */ + private String markerKey; + + /** + * Key to the object representing {@link #markerDir} with + * a trailing / added. This references the actual object + * which has been created. + */ + private String markerKeySlash; + + /** + * bucket of tests. + */ + private String bucket; + + /** + * S3 Client of the FS. + */ + private AmazonS3 s3client; + + /** + * Path to a file under the marker. + */ + private Path filePathUnderMarker; + + /** + * Key to a file under the marker. + */ + private String fileKeyUnderMarker; + + /** + * base path for the test files; the marker dir goes under this. + */ + private Path basePath; + + /** + * Path to a file a peer of markerDir. + */ + private Path markerPeer; + + /** + * Key to a file a peer of markerDir. + */ + private String markerPeerKey; + + public ITestDirectoryMarkerListing(final String name, + final boolean keepMarkers) { + this.name = name; + this.keepMarkers = keepMarkers; + this.isDeletingMarkers = !keepMarkers; + } + + @Override + protected Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + String bucketName = getTestBucketName(conf); + + // Turn off S3Guard + removeBaseAndBucketOverrides(bucketName, conf, + S3_METADATA_STORE_IMPL, + METADATASTORE_AUTHORITATIVE, + AUTHORITATIVE_PATH); + + // directory marker options + removeBaseAndBucketOverrides(bucketName, conf, + DIRECTORY_MARKER_POLICY); + conf.set(DIRECTORY_MARKER_POLICY, + keepMarkers + ? DIRECTORY_MARKER_POLICY_KEEP + : DIRECTORY_MARKER_POLICY_DELETE); + return conf; + } + + /** + * The setup phase includes creating the test objects. + */ + @Override + public void setup() throws Exception { + super.setup(); + S3AFileSystem fs = getFileSystem(); + assume("unguarded FS only", + !fs.hasMetadataStore()); + s3client = fs.getAmazonS3ClientForTesting("markers"); + + bucket = fs.getBucket(); + Path base = new Path(methodPath(), "base"); + + createTestObjects(base); + } + + /** + * Teardown deletes the objects created before + * the superclass does the directory cleanup. + */ + @Override + public void teardown() throws Exception { + if (s3client != null) { + deleteObject(markerKey); + deleteObject(markerKeySlash); + deleteObject(markerPeerKey); + deleteObject(fileKeyUnderMarker); + } + // do this ourselves to avoid audits teardown failing + // when surplus markers are found + deleteTestDirInTeardown(); + super.teardown(); + } + + /** + * Create the test objects under the given path, setting + * various fields in the process. + * @param path parent path of everything + */ + private void createTestObjects(final Path path) throws Exception { + S3AFileSystem fs = getFileSystem(); + basePath = path; + markerDir = new Path(basePath, MARKER); + // peer path has the same initial name to make sure there + // is no confusion there. + markerPeer = new Path(basePath, MARKER_PEER); + markerPeerKey = fs.pathToKey(markerPeer); + markerKey = fs.pathToKey(markerDir); + markerKeySlash = markerKey + "/"; + fileKeyUnderMarker = markerKeySlash + FILENAME; + filePathUnderMarker = new Path(markerDir, FILENAME); + // put the empty dir + fs.mkdirs(markerDir); + touch(fs, markerPeer); + put(fileKeyUnderMarker, HELLO); + } + + /* + ================================================================= + Basic probes + ================================================================= + */ + + @Test + public void testMarkerExists() throws Throwable { + describe("Verify the marker exists"); + head(markerKeySlash); + assertIsDirectory(markerDir); + } + + @Test + public void testObjectUnderMarker() throws Throwable { + describe("verify the file under the marker dir exists"); + assertIsFile(filePathUnderMarker); + head(fileKeyUnderMarker); + } + + /* + ================================================================= + The listing operations + ================================================================= + */ + + @Test + public void testListStatusMarkerDir() throws Throwable { + describe("list the marker directory and expect to see the file"); + assertContainsFileUnderMarkerOnly( + toList(getFileSystem().listStatus(markerDir))); + } + + + @Test + public void testListFilesMarkerDirFlat() throws Throwable { + assertContainsFileUnderMarkerOnly(toList( + getFileSystem().listFiles(markerDir, false))); + } + + @Test + public void testListFilesMarkerDirRecursive() throws Throwable { + List statuses = toList( + getFileSystem().listFiles(markerDir, true)); + assertContainsFileUnderMarkerOnly(statuses); + } + + /** + * Path listing above the base dir MUST only find the file + * and not the marker. + */ + @Test + public void testListStatusBaseDirRecursive() throws Throwable { + List statuses = toList( + getFileSystem().listFiles(basePath, true)); + assertContainsExactlyStatusOfPaths(statuses, filePathUnderMarker, + markerPeer); + } + + @Test + public void testGlobStatusBaseDirRecursive() throws Throwable { + Path escapedPath = new Path(escape(basePath.toUri().getPath())); + List statuses = + exec("glob", () -> + toList(getFileSystem().globStatus(new Path(escapedPath, "*")))); + assertContainsExactlyStatusOfPaths(statuses, markerDir, markerPeer); + assertIsFileAtPath(markerPeer, statuses.get(1)); + } + + @Test + public void testGlobStatusMarkerDir() throws Throwable { + Path escapedPath = new Path(escape(markerDir.toUri().getPath())); + List statuses = + exec("glob", () -> + toList(getFileSystem().globStatus(new Path(escapedPath, "*")))); + assertContainsFileUnderMarkerOnly(statuses); + } + + /** + * Call {@code listLocatedStatus(basePath)} + *

    + * The list here returns the marker peer before the + * dir. Reason: the listing iterators return + * the objects before the common prefixes, and the + * marker dir is coming back as a prefix. + */ + @Test + public void testListLocatedStatusBaseDir() throws Throwable { + List statuses = + exec("listLocatedStatus", () -> + toList(getFileSystem().listLocatedStatus(basePath))); + + assertContainsExactlyStatusOfPaths(statuses, markerPeer, markerDir); + } + + /** + * Call {@code listLocatedStatus(markerDir)}; expect + * the file entry only. + */ + @Test + public void testListLocatedStatusMarkerDir() throws Throwable { + List statuses = + exec("listLocatedStatus", () -> + toList(getFileSystem().listLocatedStatus(markerDir))); + + assertContainsFileUnderMarkerOnly(statuses); + } + + + /* + ================================================================= + Creation Rejection + ================================================================= + */ + + @Test + public void testCreateNoOverwriteMarkerDir() throws Throwable { + describe("create no-overwrite over the marker dir fails"); + head(markerKeySlash); + intercept(FileAlreadyExistsException.class, () -> + exec("create", () -> + getFileSystem().create(markerDir, false))); + // dir is still there. + head(markerKeySlash); + } + + @Test + public void testCreateNoOverwriteFile() throws Throwable { + describe("create-no-overwrite on the file fails"); + + head(fileKeyUnderMarker); + intercept(FileAlreadyExistsException.class, () -> + exec("create", () -> + getFileSystem().create(filePathUnderMarker, false))); + assertTestObjectsExist(); + } + + @Test + public void testCreateFileNoOverwrite() throws Throwable { + describe("verify the createFile() API also fails"); + head(fileKeyUnderMarker); + intercept(FileAlreadyExistsException.class, () -> + exec("create", () -> + getFileSystem().createFile(filePathUnderMarker) + .overwrite(false) + .build())); + assertTestObjectsExist(); + } + + /* + ================================================================= + Delete. + ================================================================= + */ + + @Test + public void testDelete() throws Throwable { + S3AFileSystem fs = getFileSystem(); + // a non recursive delete MUST fail because + // it is not empty + intercept(PathIsNotEmptyDirectoryException.class, () -> + fs.delete(markerDir, false)); + // file is still there + head(fileKeyUnderMarker); + + // recursive delete MUST succeed + fs.delete(markerDir, true); + // and the markers are gone + head404(fileKeyUnderMarker); + head404(markerKeySlash); + // just for completeness + fs.delete(basePath, true); + } + + /* + ================================================================= + Rename. + ================================================================= + */ + + /** + * Rename the base directory, expect the source files to move. + *

    + * Whether or not the marker itself is copied depends on whether + * the release's rename operation explicitly skips + * markers on renames. + */ + @Test + public void testRenameBase() throws Throwable { + describe("rename base directory"); + + Path src = basePath; + Path dest = new Path(methodPath(), "dest"); + assertRenamed(src, dest); + + assertPathDoesNotExist("source", src); + assertPathDoesNotExist("source", filePathUnderMarker); + assertPathExists("dest not found", dest); + + // all the paths dest relative + Path destMarkerDir = new Path(dest, MARKER); + // peer path has the same initial name to make sure there + // is no confusion there. + Path destMarkerPeer = new Path(dest, MARKER_PEER); + String destMarkerKey = toKey(destMarkerDir); + String destMarkerKeySlash = destMarkerKey + "/"; + String destFileKeyUnderMarker = destMarkerKeySlash + FILENAME; + Path destFilePathUnderMarker = new Path(destMarkerDir, FILENAME); + assertIsFile(destFilePathUnderMarker); + assertIsFile(destMarkerPeer); + head(destFileKeyUnderMarker); + + // probe for the marker based on expected rename + // behavior + if (RENAME_COPIES_MARKERS) { + head(destMarkerKeySlash); + } else { + head404(destMarkerKeySlash); + } + + } + + /** + * Rename a file under a marker by passing in the marker + * directory as the destination; the final path is derived + * from the original filename. + *

    + * After the rename: + *
      + *
    1. The data must be at the derived destination path.
    2. + *
    3. The source file must not exist.
    4. + *
    5. The parent dir of the source file must exist.
    6. + *
    7. The marker above the destination file must not exist.
    8. + *
    + */ + @Test + public void testRenameUnderMarkerDir() throws Throwable { + describe("directory rename under an existing marker"); + String file = "sourceFile"; + Path srcDir = new Path(basePath, "srcdir"); + mkdirs(srcDir); + Path src = new Path(srcDir, file); + String srcKey = toKey(src); + put(srcKey, file); + head(srcKey); + + // set the destination to be the marker directory. + Path dest = markerDir; + // rename the source file under the dest dir. + assertRenamed(src, dest); + assertIsFile(new Path(dest, file)); + assertIsDirectory(srcDir); + if (isDeletingMarkers) { + head404(markerKeySlash); + } else { + head(markerKeySlash); + } + } + + /** + * Rename file under a marker, giving the full path to the destination + * file. + *

    + * After the rename: + *
      + *
    1. The data must be at the explicit destination path.
    2. + *
    3. The source file must not exist.
    4. + *
    5. The parent dir of the source file must exist.
    6. + *
    7. The marker above the destination file must not exist.
    8. + *
    + */ + @Test + public void testRenameUnderMarkerWithPath() throws Throwable { + describe("directory rename under an existing marker"); + S3AFileSystem fs = getFileSystem(); + String file = "sourceFile"; + Path srcDir = new Path(basePath, "srcdir"); + mkdirs(srcDir); + Path src = new Path(srcDir, file); + String srcKey = toKey(src); + put(srcKey, file); + head(srcKey); + + // set the destination to be the final file + Path dest = new Path(markerDir, "destFile"); + // rename the source file to the destination file + assertRenamed(src, dest); + assertIsFile(dest); + assertIsDirectory(srcDir); + if (isDeletingMarkers) { + head404(markerKeySlash); + } else { + head(markerKeySlash); + } + } + + /** + * This test creates an empty dir and renames it over the directory marker. + * If the dest was considered to be empty, the rename would fail. + */ + @Test + public void testRenameEmptyDirOverMarker() throws Throwable { + describe("rename an empty directory over the marker"); + S3AFileSystem fs = getFileSystem(); + String dir = "sourceDir"; + Path src = new Path(basePath, dir); + fs.mkdirs(src); + assertIsDirectory(src); + String srcKey = toKey(src) + "/"; + head(srcKey); + Path dest = markerDir; + // renamed into the dest dir + assertFalse("rename(" + src + ", " + dest + ") should have failed", + getFileSystem().rename(src, dest)); + // source is still there + assertIsDirectory(src); + head(srcKey); + // and a non-recursive delete lets us verify it is considered + // an empty dir + assertDeleted(src, false); + assertTestObjectsExist(); + } + + /* + ================================================================= + Utility methods and assertions. + ================================================================= + */ + + /** + * Assert the test objects exist. + */ + private void assertTestObjectsExist() throws Exception { + head(fileKeyUnderMarker); + head(markerKeySlash); + } + + /** + * Put a string to a path. + * @param key key + * @param content string + */ + private void put(final String key, final String content) throws Exception { + exec("PUT " + key, () -> + s3client.putObject(bucket, key, content)); + } + /** + * Delete an object. + * @param key key + * @param content string + */ + private void deleteObject(final String key) throws Exception { + exec("DELETE " + key, () -> { + s3client.deleteObject(bucket, key); + return "deleted " + key; + }); + } + + /** + * Issue a HEAD request. + * @param key + * @return a description of the object. + */ + private String head(final String key) throws Exception { + ObjectMetadata md = exec("HEAD " + key, () -> + s3client.getObjectMetadata(bucket, key)); + return String.format("Object %s of length %d", + key, md.getInstanceLength()); + } + + /** + * Issue a HEAD request and expect a 404 back. + * @param key + * @return the metadata + */ + private void head404(final String key) throws Exception { + intercept(FileNotFoundException.class, "", + "Expected 404 of " + key, () -> + head(key)); + } + + /** + * Execute an operation; transate AWS exceptions. + * @param op operation + * @param call call to make + * @param returned type + * @return result of the call. + * @throws Exception failure + */ + private T exec(String op, Callable call) throws Exception { + ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); + try { + return call.call(); + } catch (AmazonClientException ex) { + throw S3AUtils.translateException(op, "", ex); + } finally { + timer.end(op); + } + } + + /** + * Assert that the listing contains only the status + * of the file under the marker. + * @param statuses status objects + */ + private void assertContainsFileUnderMarkerOnly( + final List statuses) { + + assertContainsExactlyStatusOfPaths(statuses, filePathUnderMarker); + assertIsFileUnderMarker(statuses.get(0)); + } + + /** + * Expect the list of status objects to match that of the paths. + * @param statuses status object list + * @param paths ordered varargs list of paths + * @param type of status objects + */ + private void assertContainsExactlyStatusOfPaths( + List statuses, Path... paths) { + + String actual = statuses.stream() + .map(Object::toString) + .collect(Collectors.joining(";")); + String expected = Arrays.stream(paths) + .map(Object::toString) + .collect(Collectors.joining(";")); + String summary = "expected [" + expected + "]" + + " actual = [" + actual + "]"; + assertEquals("mismatch in size of listing " + summary, + paths.length, statuses.size()); + for (int i = 0; i < statuses.size(); i++) { + assertEquals("Path mismatch at element " + i + " in " + summary, + paths[i], statuses.get(i).getPath()); + } + } + + /** + * Assert the status object refers to the file created + * under the marker. + * @param stat status object + */ + private void assertIsFileUnderMarker(final FileStatus stat) { + assertIsFileAtPath(filePathUnderMarker, stat); + } + + /** + * Assert the status object refers to a path at the given name. + * @param path path + * @param stat status object + */ + private void assertIsFileAtPath(final Path path, final FileStatus stat) { + assertTrue("Is not file " + stat, stat.isFile()); + assertPathEquals(path, stat); + } + + /** + * Assert a status object's path matches expected. + * @param path path to expect + * @param stat status object + */ + private void assertPathEquals(final Path path, final FileStatus stat) { + assertEquals("filename is not the expected path :" + stat, + path, stat.getPath()); + } + + /** + * Given a remote iterator of status objects, + * build a list of the values. + * @param status status list + * @param actual type. + * @return source. + * @throws IOException + */ + private List toList( + RemoteIterator status) throws IOException { + + List l = new ArrayList<>(); + while (status.hasNext()) { + l.add(status.next()); + } + return dump(l); + } + + /** + * Given an array of status objects, + * build a list of the values. + * @param status status list + * @param actual type. + * @return source. + * @throws IOException + */ + private List toList( + T[] status) throws IOException { + return dump(Arrays.asList(status)); + } + + /** + * Dump the string values of a list to the log; return + * the list. + * @param l source. + * @param source type + * @return the list + */ + private List dump(List l) { + int c = 1; + for (T t : l) { + LOG.info("{}\t{}", c++, t); + } + return l; + } + + /** + * Rename: assert the outcome is true. + * @param src source path + * @param dest dest path + */ + private void assertRenamed(final Path src, final Path dest) + throws IOException { + assertTrue("rename(" + src + ", " + dest + ") failed", + getFileSystem().rename(src, dest)); + } + + /** + * Convert a path to a key; does not add any trailing / . + * @param path path in + * @return key out + */ + private String toKey(final Path path) { + return getFileSystem().pathToKey(path); + } + + /** + * Escape paths before handing to globStatus; this is needed as + * parameterized runs produce paths with [] in them. + * @param pathstr source path string + * @return an escaped path string + */ + private String escape(String pathstr) { + StringBuilder r = new StringBuilder(); + for (char c : pathstr.toCharArray()) { + String ch = Character.toString(c); + if ("?*[{".contains(ch)) { + r.append("\\"); + } + r.append(ch); + } + return r.toString(); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ADeleteCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ADeleteCost.java new file mode 100644 index 0000000000000..2901767128fce --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ADeleteCost.java @@ -0,0 +1,291 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.performance; + + +import java.io.FileNotFoundException; +import java.util.Arrays; +import java.util.Collection; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.Tristate; +import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; + +import static org.apache.hadoop.fs.s3a.Statistic.*; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.*; +import static org.apache.hadoop.fs.s3a.performance.OperationCostValidator.probe; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +/** + * Use metrics to assert about the cost of file API calls. + *

    + * Parameterized on guarded vs raw. and directory marker keep vs delete. + * There's extra complexity related to bulk/non-bulk delete calls. + * If bulk deletes are disabled, many more requests are made to delete + * parent directories. The counters of objects deleted are constant + * irrespective of the delete mode. + */ +@RunWith(Parameterized.class) +public class ITestS3ADeleteCost extends AbstractS3ACostTest { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestS3ADeleteCost.class); + + /** + * Parameterization. + */ + @Parameterized.Parameters(name = "{0}") + public static Collection params() { + return Arrays.asList(new Object[][]{ + {"raw-keep-markers", false, true, false}, + {"raw-delete-markers", false, false, false}, + {"nonauth-keep-markers", true, true, false}, + {"nonauth-delete-markers", true, false, false}, + {"auth-delete-markers", true, false, true}, + {"auth-keep-markers", true, true, true} + }); + } + + public ITestS3ADeleteCost(final String name, + final boolean s3guard, + final boolean keepMarkers, + final boolean authoritative) { + super(s3guard, keepMarkers, authoritative); + } + + @Override + public void teardown() throws Exception { + if (isKeepingMarkers()) { + // do this ourselves to avoid audits teardown failing + // when surplus markers are found + deleteTestDirInTeardown(); + } + super.teardown(); + } + + /** + * This creates a directory with a child and then deletes it. + * The parent dir must be found and declared as empty. + *

    When deleting markers, that forces the recreation of a new marker.

    + */ + @Test + public void testDeleteSingleFileInDir() throws Throwable { + describe("delete a file"); + S3AFileSystem fs = getFileSystem(); + // creates the marker + Path dir = dir(methodPath()); + // file creation may have deleted that marker, but it may + // still be there + Path simpleFile = file(new Path(dir, "simple.txt")); + + boolean rawAndKeeping = isRaw() && isDeleting(); + boolean rawAndDeleting = isRaw() && isDeleting(); + verifyMetrics(() -> { + fs.delete(simpleFile, false); + return "after fs.delete(simpleFile) " + getMetricSummary(); + }, + probe(rawAndKeeping, OBJECT_METADATA_REQUESTS, + FILESTATUS_FILE_PROBE_H), + // if deleting markers, look for the parent too + probe(rawAndDeleting, OBJECT_METADATA_REQUESTS, + FILESTATUS_FILE_PROBE_H + FILESTATUS_DIR_PROBE_H), + withWhenRaw(OBJECT_LIST_REQUEST, + FILESTATUS_FILE_PROBE_L + FILESTATUS_DIR_PROBE_L), + with(DIRECTORIES_DELETED, 0), + with(FILES_DELETED, 1), + + // a single DELETE call is made to delete the object + with(OBJECT_DELETE_REQUEST, DELETE_OBJECT_REQUEST), + + // keeping: create no parent dirs or delete parents + withWhenKeeping(DIRECTORIES_CREATED, 0), + withWhenKeeping(OBJECT_BULK_DELETE_REQUEST, 0), + + // deleting: create a parent and delete any of its parents + withWhenDeleting(DIRECTORIES_CREATED, 1), + // a bulk delete for all parents is issued. + // the number of objects in it depends on the depth of the tree; + // don't worry about that + withWhenDeleting(OBJECT_BULK_DELETE_REQUEST, DELETE_MARKER_REQUEST) + ); + + // there is an empty dir for a parent + S3AFileStatus status = verifyRawInnerGetFileStatus(dir, true, + StatusProbeEnum.ALL, GET_FILE_STATUS_ON_DIR); + assertEmptyDirStatus(status, Tristate.TRUE); + } + + /** + * This creates a directory with a two files and then deletes one of the + * files. + */ + @Test + public void testDeleteFileInDir() throws Throwable { + describe("delete a file in a directory with multiple files"); + S3AFileSystem fs = getFileSystem(); + // creates the marker + Path dir = dir(methodPath()); + // file creation may have deleted that marker, but it may + // still be there + Path file1 = file(new Path(dir, "file1.txt")); + Path file2 = file(new Path(dir, "file2.txt")); + + boolean rawAndKeeping = isRaw() && isDeleting(); + boolean rawAndDeleting = isRaw() && isDeleting(); + verifyMetrics(() -> { + fs.delete(file1, false); + return "after fs.delete(file1) " + getMetricSummary(); + }, + // delete file. For keeping: that's it + probe(rawAndKeeping, OBJECT_METADATA_REQUESTS, + FILESTATUS_FILE_PROBE_H), + // if deleting markers, look for the parent too + probe(rawAndDeleting, OBJECT_METADATA_REQUESTS, + FILESTATUS_FILE_PROBE_H + FILESTATUS_DIR_PROBE_H), + withWhenRaw(OBJECT_LIST_REQUEST, + FILESTATUS_FILE_PROBE_L + FILESTATUS_DIR_PROBE_L), + with(DIRECTORIES_DELETED, 0), + with(FILES_DELETED, 1), + + // no need to create a parent + with(DIRECTORIES_CREATED, 0), + + // keeping: create no parent dirs or delete parents + withWhenKeeping(OBJECT_DELETE_REQUEST, DELETE_OBJECT_REQUEST), + + // deleting: create a parent and delete any of its parents + withWhenDeleting(OBJECT_DELETE_REQUEST, + DELETE_OBJECT_REQUEST)); + } + + @Test + public void testDirMarkersSubdir() throws Throwable { + describe("verify cost of deep subdir creation"); + + Path methodPath = methodPath(); + Path parent = new Path(methodPath, "parent"); + Path subDir = new Path(parent, "1/2/3/4/5/6"); + S3AFileSystem fs = getFileSystem(); + // this creates a peer of the parent dir, so ensures + // that when parent dir is deleted, no markers need to + // be recreated...that complicates all the metrics which + // are measured + Path sibling = new Path(methodPath, "sibling"); + ContractTestUtils.touch(fs, sibling); + + int dirsCreated = 2; + fs.delete(parent, true); + + LOG.info("creating parent dir {}", parent); + fs.mkdirs(parent); + + LOG.info("creating sub directory {}", subDir); + // one dir created, possibly a parent removed + final int fakeDirectoriesToDelete = directoriesInPath(subDir) - 1; + verifyMetrics(() -> { + mkdirs(subDir); + return "after mkdir(subDir) " + getMetricSummary(); + }, + with(DIRECTORIES_CREATED, 1), + with(DIRECTORIES_DELETED, 0), + withWhenKeeping(getDeleteMarkerStatistic(), 0), + withWhenKeeping(FAKE_DIRECTORIES_DELETED, 0), + withWhenDeleting(getDeleteMarkerStatistic(), + isBulkDelete() ? DELETE_MARKER_REQUEST : fakeDirectoriesToDelete), + // delete all possible fake dirs above the subdirectory + withWhenDeleting(FAKE_DIRECTORIES_DELETED, + fakeDirectoriesToDelete)); + + LOG.info("About to delete {}", parent); + // now delete the deep tree. + verifyMetrics(() -> { + fs.delete(parent, true); + return "deleting parent dir " + parent + " " + getMetricSummary(); + }, + + // keeping: the parent dir marker needs deletion alongside + // the subdir one. + withWhenKeeping(OBJECT_DELETE_OBJECTS, dirsCreated), + + // deleting: only the marker at the bottom needs deleting + withWhenDeleting(OBJECT_DELETE_OBJECTS, 1)); + + // followup with list calls to make sure all is clear. + verifyNoListing(parent); + verifyNoListing(subDir); + // now reinstate the directory, which in HADOOP-17244 hitting problems + fs.mkdirs(parent); + FileStatus[] children = fs.listStatus(parent); + Assertions.assertThat(children) + .describedAs("Children of %s", parent) + .isEmpty(); + } + + /** + * List a path, verify that there are no direct child entries. + * @param path path to scan + */ + protected void verifyNoListing(final Path path) throws Exception { + intercept(FileNotFoundException.class, () -> { + FileStatus[] statuses = getFileSystem().listStatus(path); + return Arrays.deepToString(statuses); + }); + } + + @Test + public void testDirMarkersFileCreation() throws Throwable { + describe("verify cost of file creation"); + + Path srcBaseDir = dir(methodPath()); + + Path srcDir = dir(new Path(srcBaseDir, "1/2/3/4/5/6")); + + // creating a file should trigger demise of the src dir marker + // unless markers are being kept + + final int directories = directoriesInPath(srcDir); + verifyMetrics(() -> { + file(new Path(srcDir, "source.txt")); + LOG.info("Metrics: {}\n{}", getMetricSummary(), getFileSystem()); + return "after touch(fs, srcFilePath) " + getMetricSummary(); + }, + with(DIRECTORIES_CREATED, 0), + with(DIRECTORIES_DELETED, 0), + // keeping: no delete operations. + withWhenKeeping(getDeleteMarkerStatistic(), 0), + withWhenKeeping(FAKE_DIRECTORIES_DELETED, 0), + // delete all possible fake dirs above the file + withWhenDeleting(getDeleteMarkerStatistic(), + isBulkDelete() ? 1: directories), + withWhenDeleting(FAKE_DIRECTORIES_DELETED, + directories)); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ARenameCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ARenameCost.java new file mode 100644 index 0000000000000..0077503e87c1a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ARenameCost.java @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.performance; + + +import java.util.Arrays; +import java.util.Collection; +import java.util.UUID; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileSystem; + +import static org.apache.hadoop.fs.s3a.Statistic.*; +import static org.apache.hadoop.fs.s3a.performance.OperationCost.*; +import static org.apache.hadoop.fs.s3a.performance.OperationCostValidator.probe; + +/** + * Use metrics to assert about the cost of file API calls. + *

    + * Parameterized on guarded vs raw. and directory marker keep vs delete + */ +@RunWith(Parameterized.class) +public class ITestS3ARenameCost extends AbstractS3ACostTest { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestS3ARenameCost.class); + + /** + * Parameterization. + */ + @Parameterized.Parameters(name = "{0}") + public static Collection params() { + return Arrays.asList(new Object[][]{ + {"raw-keep-markers", false, true, false}, + {"raw-delete-markers", false, false, false}, + {"nonauth-keep-markers", true, true, false}, + {"auth-delete-markers", true, false, true} + }); + } + + public ITestS3ARenameCost(final String name, + final boolean s3guard, + final boolean keepMarkers, + final boolean authoritative) { + super(s3guard, keepMarkers, authoritative); + } + + @Test + public void testRenameFileToDifferentDirectory() throws Throwable { + describe("rename a file to a different directory, " + + "keeping the source dir present"); + + Path baseDir = dir(methodPath()); + + Path srcDir = new Path(baseDir, "1/2/3/4/5/6"); + final Path srcFilePath = file(new Path(srcDir, "source.txt")); + + // create a new source file. + // Explicitly use a new path object to guarantee that the parent paths + // are different object instances and so equals() rather than == + // is + Path parent2 = srcFilePath.getParent(); + Path srcFile2 = file(new Path(parent2, "source2.txt")); + Assertions.assertThat(srcDir) + .isNotSameAs(parent2); + Assertions.assertThat(srcFilePath.getParent()) + .isEqualTo(srcFile2.getParent()); + + // create a directory tree, expect the dir to be created and + // possibly a request to delete all parent directories made. + Path destBaseDir = new Path(baseDir, "dest"); + Path destDir = dir(new Path(destBaseDir, "a/b/c/d")); + Path destFilePath = new Path(destDir, "dest.txt"); + + // rename the source file to the destination file. + // this tests file rename, not dir rename + // as srcFile2 exists, the parent dir of srcFilePath must not be created. + final int directoriesInPath = directoriesInPath(destDir); + verifyMetrics(() -> + execRename(srcFilePath, destFilePath), + whenRaw(RENAME_SINGLE_FILE_DIFFERENT_DIR), + with(DIRECTORIES_CREATED, 0), + with(DIRECTORIES_DELETED, 0), + // keeping: only the core delete operation is issued. + withWhenKeeping(OBJECT_DELETE_REQUEST, DELETE_OBJECT_REQUEST), + withWhenKeeping(FAKE_DIRECTORIES_DELETED, 0), + withWhenKeeping(OBJECT_DELETE_OBJECTS, 1), + + // deleting: delete any fake marker above the destination. + // the actual request count depends on whether bulk delete is + // enabled or not + + // no bulk delete: multiple marker calls + probe(isDeleting() && !isBulkDelete(), OBJECT_DELETE_REQUEST, + DELETE_OBJECT_REQUEST + directoriesInPath), + + // bulk delete: split up + probe(isDeleting() && isBulkDelete(), OBJECT_DELETE_REQUEST, + DELETE_OBJECT_REQUEST), + probe(isDeleting() && isBulkDelete(), OBJECT_BULK_DELETE_REQUEST, + DELETE_MARKER_REQUEST), + withWhenDeleting(FAKE_DIRECTORIES_DELETED, + directoriesInPath), + withWhenDeleting(OBJECT_DELETE_OBJECTS, + directoriesInPath + 1)); + + assertIsFile(destFilePath); + assertIsDirectory(srcDir); + assertPathDoesNotExist("should have gone in the rename", srcFilePath); + } + + /** + * Same directory rename is lower cost as there's no need to + * look for the parent dir of the dest path or worry about + * deleting markers. + */ + @Test + public void testRenameSameDirectory() throws Throwable { + describe("rename a file to the same directory"); + + Path baseDir = dir(methodPath()); + final Path sourceFile = file(new Path(baseDir, "source.txt")); + + // create a new source file. + // Explicitly use a new path object to guarantee that the parent paths + // are different object instances and so equals() rather than == + // is + Path parent2 = sourceFile.getParent(); + Path destFile = new Path(parent2, "dest"); + verifyMetrics(() -> + execRename(sourceFile, destFile), + whenRaw(RENAME_SINGLE_FILE_SAME_DIR), + with(OBJECT_COPY_REQUESTS, 1), + with(DIRECTORIES_CREATED, 0), + with(OBJECT_DELETE_REQUEST, DELETE_OBJECT_REQUEST), + with(FAKE_DIRECTORIES_DELETED, 0)); + } + + @Test + public void testCostOfRootFileRename() throws Throwable { + describe("assert that a root file rename doesn't" + + " do much in terms of parent dir operations"); + S3AFileSystem fs = getFileSystem(); + + // unique name, so that even when run in parallel tests, there's no conflict + String uuid = UUID.randomUUID().toString(); + Path src = file(new Path("/src-" + uuid)); + Path dest = new Path("/dest-" + uuid); + try { + verifyMetrics(() -> { + fs.rename(src, dest); + return "after fs.rename(/src,/dest) " + getMetricSummary(); + }, + whenRaw(FILE_STATUS_FILE_PROBE + .plus(GET_FILE_STATUS_FNFE) + .plus(COPY_OP)), + // here we expect there to be no fake directories + with(DIRECTORIES_CREATED, 0), + // one for the renamed file only + with(OBJECT_DELETE_REQUEST, + DELETE_OBJECT_REQUEST), + // no directories are deleted: This is root + with(DIRECTORIES_DELETED, 0), + // no fake directories are deleted: This is root + with(FAKE_DIRECTORIES_DELETED, 0), + with(FILES_DELETED, 1)); + } finally { + fs.delete(src, false); + fs.delete(dest, false); + } + } + + @Test + public void testCostOfRootFileDelete() throws Throwable { + describe("assert that a root file delete doesn't" + + " do much in terms of parent dir operations"); + S3AFileSystem fs = getFileSystem(); + + // unique name, so that even when run in parallel tests, there's no conflict + String uuid = UUID.randomUUID().toString(); + Path src = file(new Path("/src-" + uuid)); + try { + // delete that destination file, assert only the file delete was issued + verifyMetrics(() -> { + fs.delete(src, false); + return "after fs.delete(/dest) " + getMetricSummary(); + }, + with(DIRECTORIES_CREATED, 0), + with(DIRECTORIES_DELETED, 0), + with(FAKE_DIRECTORIES_DELETED, 0), + with(FILES_DELETED, 1), + with(OBJECT_DELETE_REQUEST, DELETE_OBJECT_REQUEST), + whenRaw(FILE_STATUS_FILE_PROBE)); /* no need to look at parent. */ + + } finally { + fs.delete(src, false); + } + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCost.java new file mode 100644 index 0000000000000..af4cfba0aa078 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCost.java @@ -0,0 +1,216 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.performance; + +/** + * Declaration of the costs of head and list calls for various FS IO + * operations. + *

    + * An instance declares the number of head and list calls expected for + * various operations -with a {@link #plus(OperationCost)} + * method to add operation costs together to produce an + * aggregate cost. These can then be validated in tests + * via {@link OperationCostValidator}. + * + */ +public final class OperationCost { + + /** Head costs for getFileStatus() directory probe: {@value}. */ + public static final int FILESTATUS_DIR_PROBE_H = 0; + + /** List costs for getFileStatus() directory probe: {@value}. */ + public static final int FILESTATUS_DIR_PROBE_L = 1; + + /** Head cost getFileStatus() file probe only. */ + public static final int FILESTATUS_FILE_PROBE_H = 1; + + /** Liast cost getFileStatus() file probe only. */ + + public static final int FILESTATUS_FILE_PROBE_L = 0; + + /** + * Delete cost when deleting an object. + */ + public static final int DELETE_OBJECT_REQUEST = 1; + + /** + * Delete cost when deleting a marker. + * Note: if bulk delete is disabled, this changes to being + * the number of directories deleted. + */ + public static final int DELETE_MARKER_REQUEST = DELETE_OBJECT_REQUEST; + + /** + * No IO takes place. + */ + public static final OperationCost NO_IO = + new OperationCost(0, 0); + + /** A HEAD operation. */ + public static final OperationCost HEAD_OPERATION = new OperationCost(1, 0); + + /** A LIST operation. */ + public static final OperationCost LIST_OPERATION = new OperationCost(0, 1); + + /** + * Cost of {@link org.apache.hadoop.fs.s3a.impl.StatusProbeEnum#DIRECTORIES}. + */ + public static final OperationCost FILE_STATUS_DIR_PROBE = LIST_OPERATION; + + /** + * Cost of {@link org.apache.hadoop.fs.s3a.impl.StatusProbeEnum#FILE}. + */ + public static final OperationCost FILE_STATUS_FILE_PROBE = HEAD_OPERATION; + + /** + * Cost of getFileStatus on root directory. + */ + public static final OperationCost ROOT_FILE_STATUS_PROBE = NO_IO; + + /** + * Cost of {@link org.apache.hadoop.fs.s3a.impl.StatusProbeEnum#ALL}. + */ + public static final OperationCost FILE_STATUS_ALL_PROBES = + FILE_STATUS_FILE_PROBE.plus(FILE_STATUS_DIR_PROBE); + + /** getFileStatus() on a file which exists. */ + public static final OperationCost GET_FILE_STATUS_ON_FILE = + FILE_STATUS_FILE_PROBE; + + /** List costs for getFileStatus() on a non-empty directory: {@value}. */ + public static final OperationCost GET_FILE_STATUS_ON_DIR = + FILE_STATUS_FILE_PROBE.plus(FILE_STATUS_DIR_PROBE); + + /** Costs for getFileStatus() on an empty directory: {@value}. */ + public static final OperationCost GET_FILE_STATUS_ON_EMPTY_DIR = + GET_FILE_STATUS_ON_DIR; + + /** getFileStatus() directory marker which exists. */ + public static final OperationCost GET_FILE_STATUS_ON_DIR_MARKER = + GET_FILE_STATUS_ON_EMPTY_DIR; + + /** getFileStatus() call which fails to find any entry. */ + public static final OperationCost GET_FILE_STATUS_FNFE = + FILE_STATUS_ALL_PROBES; + + /** listLocatedStatus always does a LIST. */ + public static final OperationCost LIST_LOCATED_STATUS_LIST_OP = + new OperationCost(0, 1); + + /** listFiles always does a LIST. */ + public static final OperationCost LIST_FILES_LIST_OP = LIST_OPERATION; + + /** listStatus always does a LIST. */ + public static final OperationCost LIST_STATUS_LIST_OP = LIST_OPERATION; + /** + * Metadata cost of a copy operation, as used during rename. + * This happens even if the store is guarded. + */ + public static final OperationCost COPY_OP = + new OperationCost(1, 0); + + /** + * Cost of renaming a file to a different directory. + *

    + * LIST on dest not found, look for dest dir, and then, at + * end of rename, whether a parent dir needs to be created. + */ + public static final OperationCost RENAME_SINGLE_FILE_DIFFERENT_DIR = + FILE_STATUS_FILE_PROBE // source file probe + .plus(GET_FILE_STATUS_FNFE) // dest does not exist + .plus(FILE_STATUS_FILE_PROBE) // parent dir of dest is not file + .plus(FILE_STATUS_DIR_PROBE) // recreate source parent dir? + .plus(COPY_OP); // metadata read on copy + + /** + * Cost of renaming a file to the same directory + *

    + * No need to look for parent directories, so only file + * existence checks and the copy. + */ + public static final OperationCost RENAME_SINGLE_FILE_SAME_DIR = + FILE_STATUS_FILE_PROBE // source file probe + .plus(GET_FILE_STATUS_FNFE) // dest must not exist + .plus(COPY_OP); // metadata read on copy + + /** + * create(overwrite = true) does not look for the file existing. + */ + public static final OperationCost CREATE_FILE_OVERWRITE = + FILE_STATUS_DIR_PROBE; + + /** + * create(overwrite = false) runs all the checks. + */ + public static final OperationCost CREATE_FILE_NO_OVERWRITE = + FILE_STATUS_ALL_PROBES; + + /** + * S3Guard in non-auth mode always attempts a single file + * status call. + */ + public static final OperationCost S3GUARD_NONAUTH_FILE_STATUS_PROBE = + FILE_STATUS_FILE_PROBE; + + /** Expected HEAD count. */ + private final int head; + + /** Expected LIST count. */ + private final int list; + + /** + * Constructor. + * @param head head requests. + * @param list list requests. + */ + public OperationCost(final int head, + final int list) { + this.head = head; + this.list = list; + } + + /** Expected HEAD count. */ + int head() { + return head; + } + + /** Expected LIST count. */ + int list() { + return list; + } + + /** + * Add to create a new cost. + * @param that the other entry + * @return cost of the combined operation. + */ + public OperationCost plus(OperationCost that) { + return new OperationCost( + head + that.head, + list + that.list); + } + + @Override + public String toString() { + return "OperationCost{" + + "head=" + head + + ", list=" + list + + '}'; + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCostValidator.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCostValidator.java new file mode 100644 index 0000000000000..3f03abbabc3cc --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/OperationCostValidator.java @@ -0,0 +1,505 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.performance; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.Callable; +import java.util.stream.Collectors; + +import org.assertj.core.api.Assumptions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.S3AInstrumentation; +import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.metrics2.lib.MutableCounter; +import org.apache.hadoop.metrics2.lib.MutableMetric; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_LIST_REQUEST; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_METADATA_REQUESTS; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +/** + * Support for declarative assertions about operation cost. + *

    + * Usage: A builder is used to declare the set of statistics + * to be monitored in the filesystem. + *

    + * A call to {@link #exec(Callable, ExpectedProbe...)} + * executes the callable if 1+ probe is enabled; after + * invocation the probes are validated. + * The result of the callable is returned. + *

    + * A call of {@link #intercepting(Class, String, Callable, ExpectedProbe...)} + * Invokes the callable if 1+ probe is enabled, expects an exception + * to be raised and then verifies metrics declared in the probes. + *

    + * Probes are built up from the static method to create probes + * for metrics: + *
      + *
    • {@link #probe(boolean, Statistic, int)}
    • + *
    • {@link #probe(Statistic, int)}
    • + *
    • {@link #probes(boolean, ExpectedProbe...)} (Statistic, int)}
    • + *
    • {@link #always()}
    • + *
    + * If any probe evaluates to false, an assertion is raised. + *

    + * When this happens: look in the logs! + * The logs will contain the whole set of metrics, the probe details + * and the result of the call. + */ +public final class OperationCostValidator { + + private static final Logger LOG = + LoggerFactory.getLogger(OperationCostValidator.class); + + /** + * The empty probe: declared as disabled. + */ + private static final ExpectedProbe EMPTY_PROBE = + new EmptyProbe("empty", false); + + /** + * A probe which is always enabled. + */ + private static final ExpectedProbe ALWAYS_PROBE = + new EmptyProbe("always", true); + + /** + * The map of metric diffs to track. + */ + private final Map metricDiffs + = new TreeMap<>(); + + /** + * Instrumentation's IO Statistics. + */ + private final IOStatisticsStore ioStatistics; + + /** + * Build the instance. + * @param builder builder containing all options. + */ + private OperationCostValidator(Builder builder) { + S3AFileSystem fs = builder.filesystem; + S3AInstrumentation instrumentation = fs.getInstrumentation(); + for (Statistic stat : builder.metrics) { + String symbol = stat.getSymbol(); + MutableMetric metric = instrumentation.lookupMetric(symbol); + if (metric instanceof MutableCounter) { + // only counters are used in the cost tracking; + // other statistics are ignored. + metricDiffs.put(symbol, + new S3ATestUtils.MetricDiff(fs, stat)); + } + } + builder.metrics.clear(); + ioStatistics = instrumentation.getIOStatistics(); + } + + /** + * Reset all the metrics being tracked. + */ + public void resetMetricDiffs() { + metricDiffs.values().forEach(S3ATestUtils.MetricDiff::reset); + } + + /** + * Get the diff of a statistic. + * @param stat statistic to look up + * @return the value + * @throws NullPointerException if there is no match + */ + public S3ATestUtils.MetricDiff get(Statistic stat) { + S3ATestUtils.MetricDiff diff = + requireNonNull(metricDiffs.get(stat.getSymbol()), + () -> "No metric tracking for " + stat); + return diff; + } + + /** + * Execute a closure and verify the metrics. + *

    + * If no probes are active, the operation will + * raise an Assumption exception for the test to be skipped. + * @param eval closure to evaluate + * @param expected varargs list of expected diffs + * @param return type. + * @return the result of the evaluation + */ + public T exec( + Callable eval, + ExpectedProbe... expectedA) throws Exception { + List expected = Arrays.asList(expectedA); + resetMetricDiffs(); + + // verify that 1+ probe is enabled + assumeProbesEnabled(expected); + // if we get here, then yes. + // evaluate it + T r = eval.call(); + // build the text for errors + String text = + "operation returning " + + (r != null ? r.toString() : "null"); + LOG.info("{}", text); + LOG.info("state {}", this.toString()); + LOG.info("probes {}", expected); + LOG.info("IOStatistics {}", ioStatistics); + for (ExpectedProbe ed : expected) { + ed.verify(this, text); + } + return r; + } + + /** + * Scan all probes for being enabled. + *

    + * If none of them are enabled, the evaluation will be skipped. + * @param expected list of expected probes + */ + private void assumeProbesEnabled(List expected) { + boolean enabled = false; + for (ExpectedProbe ed : expected) { + enabled |= ed.isEnabled(); + } + String pstr = expected.stream() + .map(Object::toString) + .collect(Collectors.joining(", ")); + Assumptions.assumeThat(enabled) + .describedAs("metrics to probe for are not enabled in %s", pstr) + .isTrue(); + } + + /** + * Execute a closure, expecting an exception. + * Verify the metrics after the exception has been caught and + * validated. + * @param clazz type of exception + * @param text text to look for in exception (optional) + * @param eval closure to evaluate + * @param expected varargs list of expected diffs + * @param return type of closure + * @param exception type + * @return the exception caught. + * @throws Exception any other exception + */ + public E intercepting( + Class clazz, + String text, + Callable eval, + ExpectedProbe... expected) throws Exception { + + return exec(() -> + intercept(clazz, text, eval), + expected); + } + + @Override + public String toString() { + return metricDiffs.values().stream() + .map(S3ATestUtils.MetricDiff::toString) + .collect(Collectors.joining(", ")); + } + + /** + * Create a builder for the cost checker. + * + * @param fs filesystem. + * @return builder. + */ + public static Builder builder(S3AFileSystem fs) { + return new Builder(fs); + } + + /** + * builder. + */ + public static final class Builder { + + /** + * Filesystem. + */ + private final S3AFileSystem filesystem; + + /** + * Metrics to create. + */ + private final List metrics = new ArrayList<>(); + + + /** + * Create with a required filesystem. + * @param filesystem monitored filesystem + */ + public Builder(final S3AFileSystem filesystem) { + this.filesystem = requireNonNull(filesystem); + } + + + /** + * Add a single metric. + * @param statistic statistic to monitor. + * @return this + */ + public Builder withMetric(Statistic statistic) { + metrics.add(statistic); + return this; + } + + /** + * Add a varargs list of metrics. + * @param stat statistics to monitor. + * @return this. + */ + public Builder withMetrics(Statistic...stats) { + metrics.addAll(Arrays.asList(stats)); + return this; + } + + /** + * Instantiate. + * @return the validator. + */ + public OperationCostValidator build() { + return new OperationCostValidator(this); + } + } + + /** + * Get the "always" probe. + * @return a probe which always triggers execution. + */ + public static ExpectedProbe always() { + return ALWAYS_PROBE; + } + + /** + * Create a probe of a statistic which is enabled whenever the expected + * value is greater than zero. + * @param statistic statistic to check. + * @param expected expected value. + * @return a probe. + */ + public static ExpectedProbe probe( + final Statistic statistic, + final int expected) { + return probe(expected >= 0, statistic, expected); + } + + /** + * Create a probe of a statistic which is conditionally enabled. + * @param enabled is the probe enabled? + * @param statistic statistic to check. + * @param expected expected value. + * @return a probe. + */ + public static ExpectedProbe probe( + final boolean enabled, + final Statistic statistic, + final int expected) { + return enabled + ? new ExpectSingleStatistic(statistic, expected) + : EMPTY_PROBE; + } + + /** + * Create an aggregate probe from a vararges list of probes. + * @param enabled should the probes be enabled? + * @param plist probe list + * @return a probe + */ + public static ExpectedProbe probes( + final boolean enabled, + final ExpectedProbe...plist) { + return enabled + ? new ProbeList(Arrays.asList(plist)) + : EMPTY_PROBE; + } + + /** + * Expect the exact head and list requests of the operation + * cost supplied. + * @param enabled is the probe enabled? + * @param cost expected cost. + * @return a probe. + */ + public static ExpectedProbe expect( + boolean enabled, OperationCost cost) { + return probes(enabled, + probe(OBJECT_METADATA_REQUESTS, cost.head()), + probe(OBJECT_LIST_REQUEST, cost.list())); + } + + /** + * An expected probe to verify given criteria to trigger an eval. + *

    + * Probes can be conditional, in which case they are only evaluated + * when true. + */ + public interface ExpectedProbe { + + /** + * Verify a diff if the FS instance is compatible. + * @param message message to print; metric name is appended + */ + void verify(OperationCostValidator diffs, String message); + + boolean isEnabled(); + } + + /** + * Simple probe is a single statistic. + */ + public static final class ExpectSingleStatistic implements ExpectedProbe { + + private final Statistic statistic; + + private final int expected; + + /** + * Create. + * @param statistic statistic + * @param expected expected value. + */ + private ExpectSingleStatistic(final Statistic statistic, + final int expected) { + this.statistic = statistic; + this.expected = expected; + } + + /** + * Verify a diff if the FS instance is compatible. + * @param message message to print; metric name is appended + */ + @Override + public void verify(OperationCostValidator diffs, String message) { + diffs.get(statistic).assertDiffEquals(message, expected); + } + + public Statistic getStatistic() { + return statistic; + } + + public int getExpected() { + return expected; + } + + @Override + public boolean isEnabled() { + return true; + } + + @Override + public String toString() { + String sb = "ExpectSingleStatistic{" + + statistic + + ", expected=" + expected + + ", enabled=" + isEnabled() + + '}'; + return sb; + } + } + + /** + * A list of probes; the verify operation + * verifies them all. + */ + public static class ProbeList implements ExpectedProbe { + + /** + * Probe list. + */ + private final List probes; + + /** + * Constructor. + * @param probes probe list. + */ + public ProbeList(final List probes) { + this.probes = probes; + } + + @Override + public void verify(final OperationCostValidator diffs, + final String message) { + probes.forEach(p -> p.verify(diffs, message)); + } + + /** + * Enabled if 1+ probe is enabled. + * @return true if enabled. + */ + @Override + public boolean isEnabled() { + boolean enabled = false; + for (ExpectedProbe probe : probes) { + enabled |= probe.isEnabled(); + } + return enabled; + } + + @Override + public String toString() { + String pstr = probes.stream() + .map(Object::toString) + .collect(Collectors.joining(", ")); + return "ProbeList{" + pstr + '}'; + } + } + + /** + * The empty probe always runs; it can be used to force + * a verification to execute. + */ + private static final class EmptyProbe implements ExpectedProbe { + + private final String name; + + private final boolean enabled; + + private EmptyProbe(final String name, boolean enabled) { + this.name = name; + this.enabled = enabled; + } + + @Override + public void verify(final OperationCostValidator diffs, + final String message) { + } + + @Override + public boolean isEnabled() { + return enabled; + } + + @Override + public String toString() { + return name; + } + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java index 3fda0eb0144a2..dddd84b9103be 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java @@ -37,7 +37,7 @@ import org.apache.hadoop.fs.s3a.S3AUtils; import org.apache.hadoop.fs.s3a.UnknownStoreException; import org.apache.hadoop.util.StopWatch; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileSystem; import org.junit.Test; @@ -49,22 +49,23 @@ import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; -import org.apache.hadoop.fs.s3a.commit.CommitConstants; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; -import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_REGION_KEY; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_CREATE_KEY; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_NAME_KEY; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_METASTORE_NULL; import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; import static org.apache.hadoop.fs.s3a.S3AUtils.clearBucketOption; -import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.E_BAD_STATE; +import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.BucketInfo.IS_MARKER_AWARE; import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.INVALID_ARGUMENT; import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.SUCCESS; import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.exec; +import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommand; +import static org.apache.hadoop.fs.s3a.tools.MarkerTool.MARKERS; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_NOT_ACCEPTABLE; import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** @@ -125,7 +126,7 @@ protected static void expectResult(int expected, public static String expectSuccess( String message, S3GuardTool tool, - String... args) throws Exception { + Object... args) throws Exception { ByteArrayOutputStream buf = new ByteArrayOutputStream(); exec(SUCCESS, message, tool, buf, args); return buf.toString(); @@ -138,9 +139,9 @@ public static String expectSuccess( * @return the return code * @throws Exception any exception */ - protected int run(Configuration conf, String... args) + protected int run(Configuration conf, Object... args) throws Exception { - return S3GuardTool.run(conf, args); + return runS3GuardCommand(conf, args); } /** @@ -150,8 +151,8 @@ protected int run(Configuration conf, String... args) * @return the return code * @throws Exception any exception */ - protected int run(String... args) throws Exception { - return S3GuardTool.run(getConfiguration(), args); + protected int run(Object... args) throws Exception { + return runS3GuardCommand(getConfiguration(), args); } /** @@ -161,11 +162,12 @@ protected int run(String... args) throws Exception { * @param args argument list * @throws Exception any exception */ - protected void runToFailure(int status, String... args) + protected void runToFailure(int status, Object... args) throws Exception { + final Configuration conf = getConfiguration(); ExitUtil.ExitException ex = - intercept(ExitUtil.ExitException.class, - () -> run(args)); + intercept(ExitUtil.ExitException.class, () -> + runS3GuardCommand(conf, args)); if (ex.status != status) { throw ex; } @@ -188,7 +190,7 @@ public void setup() throws Exception { conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); URI fsUri = fs.getUri(); S3AUtils.setBucketOption(conf,fsUri.getHost(), - METADATASTORE_AUTHORITATIVE, + S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); rawFs = (S3AFileSystem) FileSystem.newInstance(fsUri, conf); } @@ -446,6 +448,44 @@ public void testBucketInfoUnguarded() throws Exception { info.contains("S3A Client")); } + /** + * Verify that the {@code -markers aware} option works. + * This test case is in this class for ease of backporting. + */ + @Test + public void testBucketInfoMarkerAware() throws Throwable { + final Configuration conf = getConfiguration(); + URI fsUri = getFileSystem().getUri(); + + // run a bucket info command and look for + // confirmation that it got the output from DDB diags + S3GuardTool.BucketInfo infocmd = toClose(new S3GuardTool.BucketInfo(conf)); + String info = exec(infocmd, S3GuardTool.BucketInfo.NAME, + "-" + MARKERS, S3GuardTool.BucketInfo.MARKERS_AWARE, + fsUri.toString()); + + assertTrue("Output should contain information about S3A client " + info, + info.contains(IS_MARKER_AWARE)); + } + + /** + * Verify that the {@code -markers} option fails on unknown options. + * This test case is in this class for ease of backporting. + */ + @Test + public void testBucketInfoMarkerPolicyUnknown() throws Throwable { + final Configuration conf = getConfiguration(); + URI fsUri = getFileSystem().getUri(); + + // run a bucket info command and look for + // confirmation that it got the output from DDB diags + S3GuardTool.BucketInfo infocmd = toClose(new S3GuardTool.BucketInfo(conf)); + intercept(ExitUtil.ExitException.class, ""+ EXIT_NOT_ACCEPTABLE, () -> + exec(infocmd, S3GuardTool.BucketInfo.NAME, + "-" + MARKERS, "unknown", + fsUri.toString())); + } + @Test public void testSetCapacityFailFastIfNotGuarded() throws Exception{ Configuration conf = getConfiguration(); @@ -548,16 +588,8 @@ public void testProbeForMagic() throws Throwable { String name = fs.getUri().toString(); S3GuardTool.BucketInfo cmd = new S3GuardTool.BucketInfo( getConfiguration()); - if (fs.hasPathCapability(fs.getWorkingDirectory(), - CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER)) { - // if the FS is magic, expect this to work + // this must always work exec(cmd, S3GuardTool.BucketInfo.MAGIC_FLAG, name); - } else { - // if the FS isn't magic, expect the probe to fail - assertExitCode(E_BAD_STATE, - intercept(ExitUtil.ExitException.class, - () -> exec(cmd, S3GuardTool.BucketInfo.MAGIC_FLAG, name))); - } } /** @@ -655,4 +687,5 @@ public void testInitFailsIfNoBucketNameOrDDBTableSet() throws Exception { assertEquals("Mismatched s3 outputs: " + actualOut, filesOnS3, actualOnS3); assertFalse("Diff contained duplicates", duplicates); } + } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java index fc81c8d854109..580386a09f6b0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java @@ -46,7 +46,7 @@ import com.amazonaws.services.dynamodbv2.model.Tag; import com.amazonaws.services.dynamodbv2.model.TagResourceRequest; import com.amazonaws.services.dynamodbv2.model.UntagResourceRequest; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.assertj.core.api.Assertions; import org.apache.commons.collections.CollectionUtils; @@ -76,7 +76,7 @@ import org.apache.hadoop.security.UserGroupInformation; import static java.lang.String.valueOf; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; import static org.apache.hadoop.fs.s3a.S3AUtils.clearBucketOption; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreAuthoritativeMode.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreAuthoritativeMode.java index aa2b4e77bd4bb..3d9715ceb3ebb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreAuthoritativeMode.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreAuthoritativeMode.java @@ -54,7 +54,7 @@ import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; import static org.apache.hadoop.fs.s3a.S3AUtils.applyLocatedFiles; -import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_LIST_REQUESTS; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_LIST_REQUEST; import static org.apache.hadoop.fs.s3a.Statistic.S3GUARD_METADATASTORE_AUTHORITATIVE_DIRECTORIES_UPDATED; import static org.apache.hadoop.fs.s3a.Statistic.S3GUARD_METADATASTORE_RECORD_WRITES; import static org.apache.hadoop.fs.s3a.s3guard.AuthoritativeAuditOperation.ERROR_PATH_NOT_AUTH_IN_FS; @@ -330,7 +330,7 @@ public void testListFilesRecursiveWhenAllListingsAreAuthoritative() touchFile(nestedFile2); S3ATestUtils.MetricDiff objListRequests = - new S3ATestUtils.MetricDiff(authFS, OBJECT_LIST_REQUESTS); + new S3ATestUtils.MetricDiff(authFS, OBJECT_LIST_REQUEST); RemoteIterator statusIterator = authFS.listFiles(dir, true); @@ -372,7 +372,7 @@ public void testListFilesRecursiveWhenSomePathsAreNotAuthoritative() touchFile(nestedFile2); S3ATestUtils.MetricDiff objListRequests = - new S3ATestUtils.MetricDiff(authFS, OBJECT_LIST_REQUESTS); + new S3ATestUtils.MetricDiff(authFS, OBJECT_LIST_REQUEST); RemoteIterator statusIterator = authFS.listFiles(dir, true); @@ -766,7 +766,7 @@ private T expectAuthoritativeUpdate( S3ATestUtils.MetricDiff authDirsMarked = new S3ATestUtils.MetricDiff(authFS, S3GUARD_METADATASTORE_AUTHORITATIVE_DIRECTORIES_UPDATED); S3ATestUtils.MetricDiff listRequests = new S3ATestUtils.MetricDiff(authFS, - OBJECT_LIST_REQUESTS); + OBJECT_LIST_REQUEST); final T call = fn.call(); authDirsMarked.assertDiffEquals(updates); listRequests.assertDiffEquals(lists); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java index 75b630ae22a4b..0f6800f473517 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStoreScale.java @@ -21,6 +21,7 @@ import javax.annotation.Nullable; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -49,7 +50,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.apache.hadoop.fs.impl.WrappedIOException; import org.apache.hadoop.fs.s3a.AWSServiceThrottledException; import org.apache.hadoop.fs.s3a.Invoker; import org.apache.hadoop.fs.s3a.S3AFileStatus; @@ -61,7 +61,7 @@ import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.DurationInfo; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.s3guard.MetadataStoreTestBase.basicFileStatus; import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.PARENT; @@ -532,7 +532,7 @@ public void test_999_delete_all_entries() throws Throwable { LOG.info("Deleting {}", p); list.add(p); }); - } catch (WrappedIOException e) { + } catch (UncheckedIOException e) { // the iterator may have overloaded; swallow if so. if (!(e.getCause() instanceof AWSServiceThrottledException)) { throw e; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardDDBRootOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardDDBRootOperations.java index b2e6b3e93a8b3..f03d4e0924775 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardDDBRootOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardDDBRootOperations.java @@ -37,10 +37,13 @@ import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.impl.StoreContext; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE; import static org.apache.hadoop.fs.s3a.Constants.ENABLE_MULTI_DELETE; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY; import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching; import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBucketOverrides; import static org.apache.hadoop.fs.s3a.S3AUtils.applyLocatedFiles; @@ -52,6 +55,8 @@ * integration tests. *

    * The tests only run if DynamoDB is the metastore. + *

    + * The marker policy is fixed to "delete" */ @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class ITestS3GuardDDBRootOperations extends AbstractS3ATestBase { @@ -82,9 +87,15 @@ protected int getTestTimeoutMillis() { protected Configuration createConfiguration() { Configuration conf = super.createConfiguration(); String bucketName = getTestBucketName(conf); + disableFilesystemCaching(conf); + removeBucketOverrides(bucketName, conf, + S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, + ENABLE_MULTI_DELETE, + DIRECTORY_MARKER_POLICY); + conf.set(DIRECTORY_MARKER_POLICY, + DIRECTORY_MARKER_POLICY_DELETE); // set a sleep time of 0 on pruning, for speedier test runs. - removeBucketOverrides(bucketName, conf, ENABLE_MULTI_DELETE); conf.setTimeDuration( S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY, 0, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java index 47551f3374fd8..06148857ea7d1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStoreTestBase.java @@ -48,7 +48,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.HadoopTestBase; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.S3ATestUtils.metadataStorePersistsAuthoritativeBit; /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardToolTestHelper.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardToolTestHelper.java index 4a5e55eb61e3c..89b4051de8776 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardToolTestHelper.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardToolTestHelper.java @@ -20,12 +20,16 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.util.Arrays; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ExitCodeProvider; +import org.apache.hadoop.util.ExitUtil; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; /** @@ -48,7 +52,7 @@ private S3GuardToolTestHelper() { * @param args argument list * @throws Exception on any failure */ - public static String exec(S3GuardTool cmd, String... args) throws Exception { + public static String exec(S3GuardTool cmd, Object... args) throws Exception { return expectExecResult(0, cmd, args); } @@ -64,7 +68,7 @@ public static String exec(S3GuardTool cmd, String... args) throws Exception { public static String expectExecResult( final int expectedResult, final S3GuardTool cmd, - final String... args) throws Exception { + final Object... args) throws Exception { ByteArrayOutputStream buf = new ByteArrayOutputStream(); try { exec(expectedResult, "", cmd, buf, args); @@ -77,6 +81,17 @@ public static String expectExecResult( } } + /** + * Given an array of objects, conver to an array of strings. + * @param oargs object args + * @return string equivalent + */ + public static String[] varargsToString(final Object[] oargs) { + return Arrays.stream(oargs) + .map(Object::toString) + .toArray(String[]::new); + } + /** * Execute a command, saving the output into the buffer. * @param expectedResult expected result of the command. @@ -91,8 +106,9 @@ public static void exec(final int expectedResult, final String errorText, final S3GuardTool cmd, final ByteArrayOutputStream buf, - final String... args) + final Object... oargs) throws Exception { + final String[] args = varargsToString(oargs); LOG.info("exec {}", (Object) args); int r; try (PrintStream out = new PrintStream(buf)) { @@ -116,4 +132,43 @@ public static void exec(final int expectedResult, } } + /** + * Run a S3GuardTool command from a varags list. + *

    + * Warning: if the filesystem is retrieved from the cache, + * it will be closed afterwards. + * @param conf configuration + * @param args argument list + * @return the return code + * @throws Exception any exception + */ + public static int runS3GuardCommand(Configuration conf, Object... args) + throws Exception { + return S3GuardTool.run(conf, varargsToString(args)); + } + + /** + * Run a S3GuardTool command from a varags list, catch any raised + * ExitException and verify the status code matches that expected. + * @param conf configuration + * @param status expected status code of the exception + * @param args argument list + * @throws Exception any exception + */ + public static void runS3GuardCommandToFailure(Configuration conf, + int status, + Object... args) throws Exception { + + ExitUtil.ExitException ex = + intercept(ExitUtil.ExitException.class, + () -> { + int ec = runS3GuardCommand(conf, args); + if (ec != 0) { + throw new ExitUtil.ExitException(ec, "exit code " + ec); + } + }); + if (ex.status != status) { + throw ex; + } + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDirListingMetadata.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDirListingMetadata.java index 1ce3ee56ce0a5..5f7a6fbd072df 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDirListingMetadata.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestDirListingMetadata.java @@ -316,18 +316,23 @@ public void testRemoveExpiredEntriesFromListing() { List listing = Arrays.asList(pathMeta1, pathMeta2, pathMeta3); DirListingMetadata meta = new DirListingMetadata(path, listing, false); - meta.removeExpiredEntriesFromListing(ttl, now); + List expired = meta.removeExpiredEntriesFromListing(ttl, + now); Assertions.assertThat(meta.getListing()) .describedAs("Metadata listing for %s", path) .doesNotContain(pathMeta1) .contains(pathMeta2) .contains(pathMeta3); + Assertions.assertThat(expired) + .describedAs("Expire entries underr %s", path) + .doesNotContain(pathMeta2) + .contains(pathMeta1); } - /* + /** * Create DirListingMetadata with two dirs and one file living in directory - * 'parent' + * 'parent'. */ private static DirListingMetadata makeTwoDirsOneFile(Path parent) { PathMetadata pathMeta1 = new PathMetadata( diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java index 251d7aa3675f3..8df61dbe1a836 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestLocalMetadataStore.java @@ -22,9 +22,9 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; -import com.google.common.base.Ticker; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.base.Ticker; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.junit.Assume; import org.junit.Test; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathMetadataDynamoDBTranslation.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathMetadataDynamoDBTranslation.java index 271957ad51669..f2d27f333b03e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathMetadataDynamoDBTranslation.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathMetadataDynamoDBTranslation.java @@ -29,7 +29,7 @@ import com.amazonaws.services.dynamodbv2.document.PrimaryKey; import com.amazonaws.services.dynamodbv2.model.AttributeDefinition; import com.amazonaws.services.dynamodbv2.model.KeySchemaElement; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.junit.After; import org.junit.Assert; import org.junit.Rule; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java index 9a3db1ab21a5d..9b8e5918efee1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestPathOrderComparators.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.Path; -import static com.google.common.collect.Lists.newArrayList; +import static org.apache.hadoop.thirdparty.com.google.common.collect.Lists.newArrayList; import static org.apache.hadoop.fs.s3a.s3guard.PathOrderComparators.TOPMOST_PATH_FIRST; import static org.apache.hadoop.fs.s3a.s3guard.PathOrderComparators.TOPMOST_PATH_LAST; import static org.assertj.core.api.Assertions.assertThat; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java index 8f97179155c0d..eaa363bbf19b9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestS3Guard.java @@ -23,6 +23,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.UUID; @@ -39,12 +40,18 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3AUtils; import org.apache.hadoop.fs.s3a.Tristate; +import org.apache.hadoop.service.launcher.LauncherExitCodes; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.ExitUtil; import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_METADATASTORE_METADATA_TTL; import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_METADATA_TTL; +import static org.apache.hadoop.fs.s3a.Listing.toProvidedFileStatusIterator; +import static org.apache.hadoop.fs.s3a.s3guard.S3Guard.dirMetaToStatuses; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -77,7 +84,6 @@ public void setUp() throws Exception { ms.initialize(conf, new S3Guard.TtlTimeProvider(conf)); timeProvider = new S3Guard.TtlTimeProvider( DEFAULT_METADATASTORE_METADATA_TTL); - } @After @@ -106,9 +112,14 @@ public void testDirListingUnionNonauth() throws Exception { List s3Listing = Arrays.asList( s1Status, s2Status); - - FileStatus[] result = S3Guard.dirListingUnion(ms, DIR_PATH, s3Listing, - dirMeta, false, timeProvider); + RemoteIterator storeItr = toProvidedFileStatusIterator( + s3Listing.toArray(new S3AFileStatus[0])); + RemoteIterator resultItr = S3Guard.dirListingUnion( + ms, DIR_PATH, storeItr, dirMeta, false, + timeProvider, s3AFileStatuses -> + toProvidedFileStatusIterator(dirMetaToStatuses(dirMeta))); + S3AFileStatus[] result = S3AUtils.iteratorToStatuses( + resultItr, new HashSet<>()); assertEquals("listing length", 4, result.length); assertContainsPaths(result, MS_FILE_1, MS_FILE_2, S3_FILE_3, S3_DIR_4); @@ -122,9 +133,18 @@ public void testDirListingUnionNonauth() throws Exception { S3AFileStatus f1Status2 = new S3AFileStatus( 200, System.currentTimeMillis(), new Path(MS_FILE_1), 1, null, "tag2", "ver2"); - FileStatus[] result2 = S3Guard.dirListingUnion(ms, DIR_PATH, - Arrays.asList(f1Status2), - dirMeta, false, timeProvider); + S3AFileStatus[] f1Statuses = new S3AFileStatus[1]; + f1Statuses[0] = f1Status2; + RemoteIterator itr = toProvidedFileStatusIterator( + f1Statuses); + FileStatus[] result2 = S3AUtils.iteratorToStatuses( + S3Guard.dirListingUnion( + ms, DIR_PATH, itr, dirMeta, + false, timeProvider, + s3AFileStatuses -> + toProvidedFileStatusIterator( + dirMetaToStatuses(dirMeta))), + new HashSet<>()); // the listing returns the new status Assertions.assertThat(find(result2, MS_FILE_1)) .describedAs("Entry in listing results for %s", MS_FILE_1) @@ -157,9 +177,18 @@ public void testDirListingUnionAuth() throws Exception { ITtlTimeProvider timeProvider = new S3Guard.TtlTimeProvider( DEFAULT_METADATASTORE_METADATA_TTL); - FileStatus[] result = S3Guard.dirListingUnion(ms, DIR_PATH, s3Listing, - dirMeta, true, timeProvider); + RemoteIterator storeItr = toProvidedFileStatusIterator( + s3Listing.toArray(new S3AFileStatus[0])); + RemoteIterator resultItr = S3Guard + .dirListingUnion(ms, DIR_PATH, storeItr, dirMeta, + true, timeProvider, + s3AFileStatuses -> + toProvidedFileStatusIterator( + dirMetaToStatuses(dirMeta))); + + S3AFileStatus[] result = S3AUtils.iteratorToStatuses( + resultItr, new HashSet<>()); assertEquals("listing length", 4, result.length); assertContainsPaths(result, MS_FILE_1, MS_FILE_2, S3_FILE_3, S3_DIR_4); @@ -179,13 +208,21 @@ public void testDirListingUnionAuth() throws Exception { S3AFileStatus s1Status2 = new S3AFileStatus( 200, System.currentTimeMillis(), new Path(S3_FILE_3), 1, null, "tag2", "ver2"); + S3AFileStatus[] f1Statuses = new S3AFileStatus[1]; + f1Statuses[0] = s1Status2; + RemoteIterator itr = + toProvidedFileStatusIterator(f1Statuses); + FileStatus[] result2 = S3AUtils.iteratorToStatuses( + S3Guard.dirListingUnion(ms, DIR_PATH, itr, dirMeta, + true, timeProvider, + s3AFileStatuses -> + toProvidedFileStatusIterator( + dirMetaToStatuses(dirMeta))), + new HashSet<>()); // but the result of the listing contains the old entry // because auth mode doesn't pick up changes in S3 which // didn't go through s3guard - FileStatus[] result2 = S3Guard.dirListingUnion(ms, DIR_PATH, - Arrays.asList(s1Status2), - dirMeta2, true, timeProvider); Assertions.assertThat(find(result2, S3_FILE_3)) .describedAs("Entry in listing results for %s", S3_FILE_3) .isSameAs(file3Meta.getFileStatus()); @@ -429,11 +466,17 @@ public void testLogS3GuardDisabled() throws Exception { S3Guard.DisabledWarnLevel.WARN.toString() .toLowerCase(Locale.US), "bucket"); - LambdaTestUtils.intercept(UnsupportedOperationException.class, - S3Guard.DISABLED_LOG_MSG, () -> S3Guard.logS3GuardDisabled( + ExitUtil.ExitException ex = LambdaTestUtils.intercept( + ExitUtil.ExitException.class, + String.format(S3Guard.DISABLED_LOG_MSG, "bucket"), + () -> S3Guard.logS3GuardDisabled( localLogger, S3Guard.DisabledWarnLevel.FAIL.toString(), "bucket")); + if (ex.getExitCode() != LauncherExitCodes.EXIT_BAD_CONFIGURATION) { + throw ex; + } LambdaTestUtils.intercept(IllegalArgumentException.class, - S3Guard.UNKNOWN_WARN_LEVEL, () -> S3Guard.logS3GuardDisabled( + S3Guard.UNKNOWN_WARN_LEVEL, + () -> S3Guard.logS3GuardDisabled( localLogger, "FOO_BAR_LEVEL", "bucket")); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java index 315d1fe7285be..2b3043f39ac73 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java @@ -25,8 +25,7 @@ import com.amazonaws.event.ProgressEvent; import com.amazonaws.event.ProgressEventType; import com.amazonaws.event.ProgressListener; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.assertj.core.api.Assertions; import org.junit.FixMethodOrder; import org.junit.Test; import org.junit.runners.MethodSorters; @@ -36,17 +35,24 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.s3a.S3AFileSystem; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; +import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; +import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.util.Progressable; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.s3a.Statistic.STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupCounterStatistic; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticGaugeValue; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.demandStringifyIOStatistics; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; /** * Scale test which creates a huge file. @@ -103,7 +109,7 @@ protected Configuration createScaleConfiguration() { KEY_HUGE_PARTITION_SIZE, DEFAULT_HUGE_PARTITION_SIZE); assertTrue("Partition size too small: " + partitionSize, - partitionSize > MULTIPART_MIN_SIZE); + partitionSize >= MULTIPART_MIN_SIZE); conf.setLong(SOCKET_SEND_BUFFER, _1MB); conf.setLong(SOCKET_RECV_BUFFER, _1MB); conf.setLong(MIN_MULTIPART_THRESHOLD, partitionSize); @@ -163,14 +169,15 @@ public void test_010_CreateHugeFile() throws IOException { // there's lots of logging here, so that a tail -f on the output log // can give a view of what is happening. S3AFileSystem fs = getFileSystem(); - StorageStatistics storageStatistics = fs.getStorageStatistics(); + IOStatistics iostats = fs.getIOStatistics(); + String putRequests = Statistic.OBJECT_PUT_REQUESTS.getSymbol(); String putBytes = Statistic.OBJECT_PUT_BYTES.getSymbol(); Statistic putRequestsActive = Statistic.OBJECT_PUT_REQUESTS_ACTIVE; Statistic putBytesPending = Statistic.OBJECT_PUT_BYTES_PENDING; ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - S3AInstrumentation.OutputStreamStatistics streamStatistics; + BlockOutputStreamStatistics streamStatistics; long blocksPer10MB = blocksPerMB * 10; ProgressCallback progress = new ProgressCallback(timer); try (FSDataOutputStream out = fs.create(fileToCreate, @@ -199,9 +206,9 @@ public void test_010_CreateHugeFile() throws IOException { percentage, writtenMB, filesizeMB, - storageStatistics.getLong(putBytes), + iostats.counters().get(putBytes), gaugeValue(putBytesPending), - storageStatistics.getLong(putRequests), + iostats.counters().get(putRequests), gaugeValue(putRequestsActive), elapsedTime, writtenMB / elapsedTime)); @@ -221,20 +228,32 @@ public void test_010_CreateHugeFile() throws IOException { logFSState(); bandwidth(timer, filesize); LOG.info("Statistics after stream closed: {}", streamStatistics); - long putRequestCount = storageStatistics.getLong(putRequests); - Long putByteCount = storageStatistics.getLong(putBytes); + + LOG.info("IOStatistics after upload: {}", + demandStringifyIOStatistics(iostats)); + long putRequestCount = lookupCounterStatistic(iostats, putRequests); + long putByteCount = lookupCounterStatistic(iostats, putBytes); + Assertions.assertThat(putRequestCount) + .describedAs("Put request count from filesystem stats %s", + iostats) + .isGreaterThan(0); + Assertions.assertThat(putByteCount) + .describedAs("%s count from filesystem stats %s", + putBytes, iostats) + .isGreaterThan(0); LOG.info("PUT {} bytes in {} operations; {} MB/operation", putByteCount, putRequestCount, putByteCount / (putRequestCount * _1MB)); LOG.info("Time per PUT {} nS", toHuman(timer.nanosPerOperation(putRequestCount))); - assertEquals("active put requests in \n" + fs, - 0, gaugeValue(putRequestsActive)); + verifyStatisticGaugeValue(iostats, putRequestsActive.getSymbol(), 0); + verifyStatisticGaugeValue(iostats, + STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING.getSymbol(), 0); progress.verifyNoFailures( "Put file " + fileToCreate + " of size " + filesize); if (streamStatistics != null) { assertEquals("actively allocated blocks in " + streamStatistics, - 0, streamStatistics.blocksActivelyAllocated()); + 0, streamStatistics.getBlocksActivelyAllocated()); } } @@ -541,12 +560,7 @@ public void test_800_DeleteHugeFiles() throws IOException { */ @Test public void test_900_dumpStats() { - StringBuilder sb = new StringBuilder(); - - getFileSystem().getStorageStatistics() - .forEach(kv -> sb.append(kv.toString()).append("\n")); - - LOG.info("Statistics\n{}", sb); + LOG.info("Statistics\n{}", ioStatisticsSourceToString(getFileSystem())); } protected void deleteHugeFile() throws IOException { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java index a1d5c46159aaa..3fa288417c4d3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java @@ -30,8 +30,8 @@ import java.util.concurrent.ExecutorService; import com.amazonaws.services.s3.model.DeleteObjectsRequest; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.FixMethodOrder; @@ -45,8 +45,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.apache.hadoop.fs.impl.FunctionsRaisingIOE; -import org.apache.hadoop.fs.impl.WrappedIOException; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.auth.delegation.Csvout; @@ -316,15 +314,6 @@ private List buildDeleteRequest( return request; } - - private R wrap(FunctionsRaisingIOE.CallableRaisingIOE callable) { - try { - return callable.apply(); - } catch (IOException e) { - throw new WrappedIOException(e); - } - } - /** * Outcome of one of the load operations. */ diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java index 03f1e220b504d..0751959bbdc74 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java @@ -18,18 +18,45 @@ package org.apache.hadoop.fs.s3a.scale; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.statistics.IOStatistics; + import org.junit.Test; +import org.assertj.core.api.Assertions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.PutObjectResult; +import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; import static org.apache.hadoop.fs.s3a.Statistic.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupCounterStatistic; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_CONTINUE_LIST_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; /** * Test the performance of listing files/directories. @@ -51,7 +78,7 @@ public void testListOperations() throws Throwable { int depth = scale; int files = scale; MetricDiff metadataRequests = new MetricDiff(fs, OBJECT_METADATA_REQUESTS); - MetricDiff listRequests = new MetricDiff(fs, OBJECT_LIST_REQUESTS); + MetricDiff listRequests = new MetricDiff(fs, Statistic.OBJECT_LIST_REQUEST); MetricDiff listContinueRequests = new MetricDiff(fs, OBJECT_CONTINUE_LIST_REQUESTS); MetricDiff listStatusCalls = new MetricDiff(fs, INVOCATION_LIST_FILES); @@ -137,6 +164,132 @@ public void testListOperations() throws Throwable { } } + @Test + public void testMultiPagesListingPerformanceAndCorrectness() + throws Throwable { + describe("Check performance and correctness for multi page listing " + + "using different listing api"); + final Path dir = methodPath(); + final int batchSize = 10; + final int numOfPutRequests = 1000; + final int eachFileProcessingTime = 10; + final int numOfPutThreads = 50; + Assertions.assertThat(numOfPutRequests % batchSize) + .describedAs("Files put %d must be a multiple of list batch size %d", + numOfPutRequests, batchSize) + .isEqualTo(0); + final Configuration conf = + getConfigurationWithConfiguredBatchSize(batchSize); + removeBaseAndBucketOverrides(conf, S3_METADATA_STORE_IMPL); + final InputStream im = new InputStream() { + @Override + public int read() throws IOException { + return -1; + } + }; + final List originalListOfFiles = new ArrayList<>(); + List> putObjectRequests = new ArrayList<>(); + ExecutorService executorService = Executors + .newFixedThreadPool(numOfPutThreads); + + NanoTimer uploadTimer = new NanoTimer(); + S3AFileSystem fs = (S3AFileSystem) FileSystem.get(dir.toUri(), conf); + try { + assume("Test is only for raw fs", !fs.hasMetadataStore()); + fs.create(dir); + for (int i=0; i + fs.getWriteOperationHelper().putObject(put)); + } + executorService.invokeAll(putObjectRequests); + uploadTimer.end("uploading %d files with a parallelism of %d", + numOfPutRequests, numOfPutThreads); + + RemoteIterator resIterator = fs.listFiles(dir, true); + List listUsingListFiles = new ArrayList<>(); + NanoTimer timeUsingListFiles = new NanoTimer(); + while(resIterator.hasNext()) { + listUsingListFiles.add(resIterator.next().getPath().toString()); + Thread.sleep(eachFileProcessingTime); + } + timeUsingListFiles.end("listing %d files using listFiles() api with " + + "batch size of %d including %dms of processing time" + + " for each file", + numOfPutRequests, batchSize, eachFileProcessingTime); + + Assertions.assertThat(listUsingListFiles) + .describedAs("Listing results using listFiles() must" + + "match with original list of files") + .hasSameElementsAs(originalListOfFiles) + .hasSize(numOfPutRequests); + List listUsingListStatus = new ArrayList<>(); + NanoTimer timeUsingListStatus = new NanoTimer(); + FileStatus[] fileStatuses = fs.listStatus(dir); + for(FileStatus fileStatus : fileStatuses) { + listUsingListStatus.add(fileStatus.getPath().toString()); + Thread.sleep(eachFileProcessingTime); + } + timeUsingListStatus.end("listing %d files using listStatus() api with " + + "batch size of %d including %dms of processing time" + + " for each file", + numOfPutRequests, batchSize, eachFileProcessingTime); + Assertions.assertThat(listUsingListStatus) + .describedAs("Listing results using listStatus() must" + + "match with original list of files") + .hasSameElementsAs(originalListOfFiles) + .hasSize(numOfPutRequests); + // Validate listing using listStatusIterator(). + NanoTimer timeUsingListStatusItr = new NanoTimer(); + RemoteIterator lsItr = fs.listStatusIterator(dir); + List listUsingListStatusItr = new ArrayList<>(); + while (lsItr.hasNext()) { + listUsingListStatusItr.add(lsItr.next().getPath().toString()); + Thread.sleep(eachFileProcessingTime); + } + timeUsingListStatusItr.end("listing %d files using " + + "listStatusIterator() api with batch size of %d " + + "including %dms of processing time for each file", + numOfPutRequests, batchSize, eachFileProcessingTime); + Assertions.assertThat(listUsingListStatusItr) + .describedAs("Listing results using listStatusIterator() must" + + "match with original list of files") + .hasSameElementsAs(originalListOfFiles) + .hasSize(numOfPutRequests); + // now validate the statistics returned by the listing + // to be non-null and containing list and continue counters. + IOStatistics lsStats = retrieveIOStatistics(lsItr); + String statsReport = ioStatisticsToString(lsStats); + LOG.info("Listing Statistics: {}", statsReport); + verifyStatisticCounterValue(lsStats, OBJECT_LIST_REQUEST, 1); + long continuations = lookupCounterStatistic(lsStats, + OBJECT_CONTINUE_LIST_REQUEST); + // calculate expected #of continuations + int expectedContinuations = numOfPutRequests / batchSize -1; + Assertions.assertThat(continuations) + .describedAs("%s in %s", OBJECT_CONTINUE_LIST_REQUEST, statsReport) + .isEqualTo(expectedContinuations); + } finally { + executorService.shutdown(); + // delete in this FS so S3Guard is left out of it. + fs.delete(dir, true); + fs.close(); + } + } + + private Configuration getConfigurationWithConfiguredBatchSize(int batchSize) { + Configuration conf = new Configuration(getFileSystem().getConf()); + S3ATestUtils.disableFilesystemCaching(conf); + conf.setInt(Constants.MAX_PAGING_KEYS, batchSize); + return conf; + } + @Test public void testTimeToStatEmptyDirectory() throws Throwable { describe("Time to stat an empty directory"); @@ -175,7 +328,7 @@ private void timeToStatPath(Path path) throws IOException { MetricDiff metadataRequests = new MetricDiff(fs, Statistic.OBJECT_METADATA_REQUESTS); MetricDiff listRequests = - new MetricDiff(fs, Statistic.OBJECT_LIST_REQUESTS); + new MetricDiff(fs, Statistic.OBJECT_LIST_REQUEST); long attempts = getOperationCount(); NanoTimer timer = new NanoTimer(); for (long l = 0; l < attempts; l++) { @@ -188,5 +341,4 @@ private void timeToStatPath(Path path) throws IOException { LOG.info("listObjects: {}", listRequests); LOG.info("listObjects: per operation {}", listRequests.diff() / attempts); } - } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java index 2e5185bf55d9d..a8635ea3cd792 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java @@ -23,6 +23,9 @@ import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM; +import static org.apache.hadoop.fs.s3a.Constants.SERVER_SIDE_ENCRYPTION_KEY; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfEncryptionTestsDisabled; /** @@ -45,6 +48,8 @@ public void setup() throws Exception { @Override protected Configuration createScaleConfiguration() { Configuration conf = super.createScaleConfiguration(); + removeBaseAndBucketOverrides(conf, SERVER_SIDE_ENCRYPTION_KEY, + SERVER_SIDE_ENCRYPTION_ALGORITHM); S3ATestUtils.disableFilesystemCaching(conf); conf.set(Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM, getSSEAlgorithm().getMethod()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java index efd96c4e7387e..4d1096fa79053 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java @@ -27,13 +27,20 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3AInputPolicy; import org.apache.hadoop.fs.s3a.S3AInputStream; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.fs.statistics.MeanStatistic; +import org.apache.hadoop.fs.statistics.StreamStatisticNames; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.util.LineReader; + +import org.assertj.core.api.Assertions; import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -46,6 +53,17 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.*; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticMinimum; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupMaximumStatistic; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupMeanStatistic; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; +import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.snapshotIOStatistics; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.ACTION_HTTP_GET_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MAX; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MEAN; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MIN; /** * Look at the performance of S3a operations. @@ -58,10 +76,13 @@ public class ITestS3AInputStreamPerformance extends S3AScaleTestBase { private Path testData; private FileStatus testDataStatus; private FSDataInputStream in; - private S3AInstrumentation.InputStreamStatistics streamStatistics; + private S3AInputStreamStatistics streamStatistics; public static final int BLOCK_SIZE = 32 * 1024; public static final int BIG_BLOCK_SIZE = 256 * 1024; + private static final IOStatisticsSnapshot IOSTATS = snapshotIOStatistics(); + + /** Tests only run if the there is a named test file that can be read. */ private boolean testDataAvailable = true; private String assumptionMessage = "test file"; @@ -106,7 +127,22 @@ private void bindS3aFS(Path path) throws IOException { public void cleanup() { describe("cleanup"); IOUtils.closeStream(in); - IOUtils.closeStream(s3aFS); + if (in != null) { + LOG.info("Stream statistics {}", + ioStatisticsSourceToString(in)); + IOSTATS.aggregate(in.getIOStatistics()); + } + if (s3aFS != null) { + LOG.info("FileSystem statistics {}", + ioStatisticsSourceToString(s3aFS)); + FILESYSTEM_IOSTATS.aggregate(s3aFS.getIOStatistics()); + IOUtils.closeStream(s3aFS); + } + } + + @AfterClass + public static void dumpIOStatistics() { + LOG.info("Aggregate Stream Statistics {}", IOSTATS); } /** @@ -187,7 +223,7 @@ protected void assertStreamOpenedExactlyOnce() { */ private void assertOpenOperationCount(long expected) { assertEquals("open operations in\n" + in, - expected, streamStatistics.openOperations); + expected, streamStatistics.getOpenOperations()); } /** @@ -295,7 +331,7 @@ public void testLazySeekEnabled() throws Throwable { logTimePerIOP("seek()", timer, blockCount); logStreamStatistics(); assertOpenOperationCount(0); - assertEquals("bytes read", 0, streamStatistics.bytesRead); + assertEquals("bytes read", 0, streamStatistics.getBytesRead()); } @Test @@ -320,7 +356,7 @@ public void testReadWithNormalPolicy() throws Throwable { @Test public void testDecompressionSequential128K() throws Throwable { describe("Decompress with a 128K readahead"); - executeDecompression(128 * 1024, S3AInputPolicy.Sequential); + executeDecompression(128 * _1KB, S3AInputPolicy.Sequential); assertStreamOpenedExactlyOnce(); } @@ -339,9 +375,11 @@ private void executeDecompression(long readahead, int lines = 0; FSDataInputStream objectIn = openTestFile(inputPolicy, readahead); + IOStatistics readerStatistics = null; ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); try (LineReader lineReader = new LineReader( codec.createInputStream(objectIn), getConf())) { + readerStatistics = lineReader.getIOStatistics(); Text line = new Text(); int read; while ((read = lineReader.readLine(line)) > 0) { @@ -359,6 +397,9 @@ private void executeDecompression(long readahead, readahead); logTimePerIOP("line read", timer, lines); logStreamStatistics(); + assertNotNull("No IOStatistics through line reader", readerStatistics); + LOG.info("statistics from reader {}", + ioStatisticsToString(readerStatistics)); } private void logStreamStatistics() { @@ -391,8 +432,8 @@ protected void executeSeekReadSequence(long blockSize, readahead); logTimePerIOP("seek(pos + " + blockCount+"); read()", timer, blockCount); LOG.info("Effective bandwidth {} MB/S", - timer.bandwidthDescription(streamStatistics.bytesRead - - streamStatistics.bytesSkippedOnSeek)); + timer.bandwidthDescription(streamStatistics.getBytesRead() - + streamStatistics.getBytesSkippedOnSeek())); logStreamStatistics(); } @@ -419,7 +460,7 @@ protected void executeSeekReadSequence(long blockSize, public void testRandomIORandomPolicy() throws Throwable { executeRandomIO(S3AInputPolicy.Random, (long) RANDOM_IO_SEQUENCE.length); assertEquals("streams aborted in " + streamStatistics, - 0, streamStatistics.aborted); + 0, streamStatistics.getAborted()); } @Test @@ -427,11 +468,21 @@ public void testRandomIONormalPolicy() throws Throwable { long expectedOpenCount = RANDOM_IO_SEQUENCE.length; executeRandomIO(S3AInputPolicy.Normal, expectedOpenCount); assertEquals("streams aborted in " + streamStatistics, - 1, streamStatistics.aborted); + 1, streamStatistics.getAborted()); assertEquals("policy changes in " + streamStatistics, - 2, streamStatistics.policySetCount); + 2, streamStatistics.getPolicySetCount()); assertEquals("input policy in " + streamStatistics, - S3AInputPolicy.Random.ordinal(), streamStatistics.inputPolicy); + S3AInputPolicy.Random.ordinal(), + streamStatistics.getInputPolicy()); + IOStatistics ioStatistics = streamStatistics.getIOStatistics(); + verifyStatisticCounterValue( + ioStatistics, + StreamStatisticNames.STREAM_READ_ABORTED, + 1); + verifyStatisticCounterValue( + ioStatistics, + StreamStatisticNames.STREAM_READ_SEEK_POLICY_CHANGED, + 2); } /** @@ -466,9 +517,22 @@ private ContractTestUtils.NanoTimer executeRandomIO(S3AInputPolicy policy, assertOpenOperationCount(expectedOpenCount); logTimePerIOP("byte read", timer, totalBytesRead); LOG.info("Effective bandwidth {} MB/S", - timer.bandwidthDescription(streamStatistics.bytesRead - - streamStatistics.bytesSkippedOnSeek)); + timer.bandwidthDescription(streamStatistics.getBytesRead() - + streamStatistics.getBytesSkippedOnSeek())); logStreamStatistics(); + IOStatistics iostats = in.getIOStatistics(); + long maxHttpGet = lookupMaximumStatistic(iostats, + ACTION_HTTP_GET_REQUEST + SUFFIX_MAX); + assertThatStatisticMinimum(iostats, + ACTION_HTTP_GET_REQUEST + SUFFIX_MIN) + .isGreaterThan(0) + .isLessThan(maxHttpGet); + MeanStatistic getMeanStat = lookupMeanStatistic(iostats, + ACTION_HTTP_GET_REQUEST + SUFFIX_MEAN); + Assertions.assertThat(getMeanStat.getSamples()) + .describedAs("sample count of %s", getMeanStat) + .isEqualTo(expectedOpenCount); + return timer; } @@ -525,7 +589,7 @@ public void testRandomReadOverBuffer() throws Throwable { + " current position in stream " + currentPos + " in\n" + fs + "\n " + in, - 1, streamStatistics.openOperations); + 1, streamStatistics.getOpenOperations()); for (int i = currentPos; i < currentPos + read; i++) { assertEquals("Wrong value from byte " + i, sourceData[i], buffer[i]); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java new file mode 100644 index 0000000000000..231cfd884e0c8 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.scale; + +import java.io.File; + +import org.assertj.core.api.Assertions; +import org.junit.Test; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.s3a.S3AInstrumentation; +import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.auth.ProgressCounter; +import org.apache.hadoop.fs.s3a.commit.CommitOperations; + +import static org.apache.hadoop.fs.StreamCapabilities.ABORTABLE_STREAM; +import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; +import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; +import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyFileContents; +import static org.apache.hadoop.fs.contract.ContractTestUtils.writeTextFile; +import static org.apache.hadoop.fs.s3a.Constants.MULTIPART_SIZE; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; +import static org.apache.hadoop.fs.s3a.Statistic.INVOCATION_ABORT; +import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_MULTIPART_UPLOAD_ABORTED; +import static org.apache.hadoop.fs.s3a.impl.InternalConstants.UPLOAD_PART_COUNT_LIMIT; +import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.assertCompleteAbort; +import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.assertNoopAbort; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticCounter; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.verifyStatisticCounterValue; +import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +/** + * Testing S3 multipart upload for s3. + */ +public class ITestS3AMultipartUploadSizeLimits extends S3AScaleTestBase { + + public static final int MPU_SIZE = 5 * _1MB; + + @Override + protected Configuration createScaleConfiguration() { + Configuration configuration = super.createScaleConfiguration(); + removeBaseAndBucketOverrides(configuration, + MULTIPART_SIZE, + UPLOAD_PART_COUNT_LIMIT); + configuration.setLong(MULTIPART_SIZE, MPU_SIZE); + // Setting the part count limit to 2 such that we + // failures. + configuration.setLong(UPLOAD_PART_COUNT_LIMIT, 2); + return configuration; + } + + /** + * Uploads under the limit are valid. + */ + @Test + public void testTwoPartUpload() throws Throwable { + Path file = path(getMethodName()); + // Creating a file having parts less than configured + // part count will succeed. + createFile(getFileSystem(), file, true, + dataset(6 * _1MB, 'a', 'z' - 'a')); + } + + /** + * Tests to validate that exception is thrown during a + * multi part upload when the number of parts is greater + * than the allowed limit. + */ + @Test + public void testUploadOverLimitFailure() throws Throwable { + S3AFileSystem fs = getFileSystem(); + Path file = path(getMethodName()); + // Creating a file with more than configured part count should + // throw a PathIOE + intercept(PathIOException.class, + () -> createFile(fs, + file, + false, + dataset(15 * _1MB, 'a', 'z' - 'a'))); + // and the path does not exist + assertPathDoesNotExist("upload must not have completed", file); + } + + @Test + public void testCommitLimitFailure() throws Throwable { + describe("verify commit uploads fail-safe when MPU limits exceeded"); + S3AFileSystem fs = getFileSystem(); + CommitOperations actions = new CommitOperations(fs); + File tempFile = File.createTempFile("commit", ".txt"); + FileUtils.writeByteArrayToFile(tempFile, + dataset(15 * _1MB, 'a', 'z' - 'a')); + Path dest = methodPath(); + final S3AInstrumentation instrumentation = fs.getInstrumentation(); + final long initial = instrumentation.getCounterValue( + Statistic.COMMITTER_COMMITS_ABORTED); + + intercept(PathIOException.class, () -> + actions.uploadFileToPendingCommit(tempFile, + dest, + null, + MPU_SIZE, + new ProgressCounter())); + assertPathDoesNotExist("upload must not have completed", dest); + final long after = instrumentation.getCounterValue( + Statistic.COMMITTER_COMMITS_ABORTED); + Assertions.assertThat(after). + describedAs("commit abort count") + .isEqualTo(initial + 1); + } + + @Test + public void testAbortAfterTwoPartUpload() throws Throwable { + Path file = path(getMethodName()); + + byte[] data = dataset(6 * _1MB, 'a', 'z' - 'a'); + + S3AFileSystem fs = getFileSystem(); + FSDataOutputStream stream = fs.create(file, true); + try { + stream.write(data); + + // From testTwoPartUpload() we know closing stream will finalize uploads + // and materialize the path. Here we call abort() to abort the upload, + // and ensure the path is NOT available. (uploads are aborted) + + assertCompleteAbort(stream.abort()); + + // the path should not exist + assertPathDoesNotExist("upload must not have completed", file); + } finally { + IOUtils.closeStream(stream); + // check the path doesn't exist "after" closing stream + assertPathDoesNotExist("upload must not have completed", file); + } + verifyStreamWasAborted(fs, stream); + // a second abort is a no-op + assertNoopAbort(stream.abort()); + } + + + @Test + public void testAbortWhenOverwritingAFile() throws Throwable { + Path file = path(getMethodName()); + + S3AFileSystem fs = getFileSystem(); + // write the original data + byte[] smallData = writeTextFile(fs, file, "original", true); + + // now attempt a multipart upload + byte[] data = dataset(6 * _1MB, 'a', 'z' - 'a'); + FSDataOutputStream stream = fs.create(file, true); + try { + ContractTestUtils.assertCapabilities(stream, + new String[]{ABORTABLE_STREAM}, + null); + stream.write(data); + assertCompleteAbort(stream.abort()); + + verifyFileContents(fs, file, smallData); + } finally { + IOUtils.closeStream(stream); + } + verifyFileContents(fs, file, smallData); + verifyStreamWasAborted(fs, stream); + } + + /** + * Check up on the IOStatistics of the FS and stream to verify that + * a stream was aborted -both in invocations of abort() and + * that the multipart upload itself was aborted. + * @param fs filesystem + * @param stream stream + */ + private void verifyStreamWasAborted(final S3AFileSystem fs, + final FSDataOutputStream stream) { + // check the stream + final IOStatistics iostats = stream.getIOStatistics(); + final String sstr = ioStatisticsToPrettyString(iostats); + LOG.info("IOStatistics for stream: {}", sstr); + verifyStatisticCounterValue(iostats, INVOCATION_ABORT.getSymbol(), 1); + verifyStatisticCounterValue(iostats, + OBJECT_MULTIPART_UPLOAD_ABORTED.getSymbol(), 1); + + // now the FS. + final IOStatistics fsIostats = fs.getIOStatistics(); + assertThatStatisticCounter(fsIostats, INVOCATION_ABORT.getSymbol()) + .isGreaterThanOrEqualTo(1); + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java index eb80bc579f6c1..d95b46b10d786 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java @@ -23,10 +23,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; import org.apache.hadoop.fs.s3a.S3AInputStream; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; import org.apache.hadoop.fs.s3a.S3ATestConstants; import org.apache.hadoop.fs.s3a.Statistic; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,6 +33,7 @@ import java.io.InputStream; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.lookupGaugeStatistic; /** * Base class for scale tests; here is where the common scale configuration @@ -105,9 +105,7 @@ public void setup() throws Exception { * @return the configuration. */ private synchronized Configuration demandCreateConfiguration() { - if (conf == null) { - conf = createScaleConfiguration(); - } + conf = createScaleConfiguration(); return conf; } @@ -162,7 +160,7 @@ protected int getTestTimeoutMillis() { * @param in wrapper * @return the statistics for the inner stream */ - protected S3AInstrumentation.InputStreamStatistics getInputStreamStatistics( + protected S3AInputStreamStatistics getInputStreamStatistics( FSDataInputStream in) { return getS3AInputStream(in).getS3AStreamStatistics(); } @@ -185,17 +183,15 @@ protected S3AInputStream getS3AInputStream( } /** - * Get the gauge value of a statistic. Raises an assertion if + * Get the gauge value of a statistic from the + * IOStatistics of the filesystem. Raises an assertion if * there is no such gauge. * @param statistic statistic to look up * @return the value. */ public long gaugeValue(Statistic statistic) { - S3AInstrumentation instrumentation = getFileSystem().getInstrumentation(); - MutableGaugeLong gauge = instrumentation.lookupGauge(statistic.getSymbol()); - assertNotNull("No gauge " + statistic - + " in " + instrumentation.dump("", " = ", "\n", true), gauge); - return gauge.value(); + return lookupGaugeStatistic(getFileSystem().getIOStatistics(), + statistic.getSymbol()); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/CsvFile.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/CsvFile.java index 06e6d2a78aef7..8e3da890d7e69 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/CsvFile.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/CsvFile.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.io.PrintWriter; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java index 64974db5a466c..6918941295699 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java @@ -47,9 +47,9 @@ import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3AInputStream; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; @@ -302,8 +302,8 @@ public void testSelectSeek() throws Throwable { SELECT_EVERYTHING)) { SelectInputStream sis = (SelectInputStream) seekStream.getWrappedStream(); - S3AInstrumentation.InputStreamStatistics streamStats - = sis.getS3AStreamStatistics(); + S3AInputStreamStatistics streamStats = + sis.getS3AStreamStatistics(); // lazy seek doesn't raise a problem here seekStream.seek(0); assertEquals("first byte read", fullData[0], seekStream.read()); @@ -344,7 +344,7 @@ public void testSelectSeek() throws Throwable { assertEquals("byte at seek position", fullData[(int)seekStream.getPos()], seekStream.read()); assertEquals("Seek bytes skipped in " + streamStats, - seekRange, streamStats.bytesSkippedOnSeek); + seekRange, streamStats.getBytesSkippedOnSeek()); // try an invalid readahead range intercept(IllegalArgumentException.class, @@ -395,8 +395,9 @@ public void testSelectOddLinesNoHeader() throws Throwable { "SELECT * FROM S3OBJECT s WHERE s._5 = `TRUE`"); // and do a quick check on the instrumentation long bytesRead = getFileSystem().getInstrumentation() - .getCounterValue(Statistic.STREAM_SEEK_BYTES_READ); - assertNotEquals("No bytes read count", 0, bytesRead); + .getCounterValue(Statistic.STREAM_READ_BYTES); + assertNotEquals("No bytes read count in filesystem instrumentation counter", + 0, bytesRead); } @Test @@ -588,13 +589,14 @@ public void testCloseWithAbort() throws Throwable { stream.setReadahead(1L); assertEquals("Readahead on " + sis, 1, sis.getReadahead()); stream.read(); - S3AInstrumentation.InputStreamStatistics stats - = sis.getS3AStreamStatistics(); + S3AInputStreamStatistics stats + = (S3AInputStreamStatistics) + sis.getS3AStreamStatistics(); assertEquals("Read count in " + sis, - 1, stats.bytesRead); + 1, stats.getBytesRead()); stream.close(); assertEquals("Abort count in " + sis, - 1, stats.aborted); + 1, stats.getAborted()); readOps.assertDiffEquals("Read operations are still considered active", 0); intercept(PathIOException.class, FSExceptionMessages.STREAM_IS_CLOSED, @@ -608,12 +610,14 @@ public void testCloseWithNoAbort() throws Throwable { "SELECT * FROM S3OBJECT s"); stream.setReadahead(0x1000L); SelectInputStream sis = (SelectInputStream) stream.getWrappedStream(); - S3AInstrumentation.InputStreamStatistics stats - = sis.getS3AStreamStatistics(); + S3AInputStreamStatistics stats + = (S3AInputStreamStatistics) + sis.getS3AStreamStatistics(); stream.close(); - assertEquals("Close count in " + sis, 1, stats.closed); - assertEquals("Abort count in " + sis, 0, stats.aborted); - assertTrue("No bytes read in close of " + sis, stats.bytesReadInClose > 0); + assertEquals("Close count in " + sis, 1, stats.getClosed()); + assertEquals("Abort count in " + sis, 0, stats.getAborted()); + assertTrue("No bytes read in close of " + sis, + stats.getBytesReadInClose() > 0); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectCLI.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectCLI.java index e31b48e5b5862..a29abfdf639fc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectCLI.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectCLI.java @@ -42,7 +42,7 @@ import org.apache.hadoop.util.OperationDuration; import org.apache.hadoop.util.ToolRunner; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching; import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.exec; import static org.apache.hadoop.fs.s3a.select.ITestS3SelectLandsat.SELECT_NOTHING; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectLandsat.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectLandsat.java index 2099edd248b63..8b0578df11c01 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectLandsat.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectLandsat.java @@ -35,8 +35,8 @@ import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy.Source; +import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics; import org.apache.hadoop.fs.s3a.S3AFileSystem; -import org.apache.hadoop.fs.s3a.S3AInstrumentation; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.Statistic; import org.apache.hadoop.mapred.JobConf; @@ -381,7 +381,7 @@ public void testSelectSeekFullLandsat() throws Throwable { SELECT_EVERYTHING)) { SelectInputStream sis = (SelectInputStream) seekStream.getWrappedStream(); - S3AInstrumentation.InputStreamStatistics streamStats + S3AInputStreamStatistics streamStats = sis.getS3AStreamStatistics(); // lazy seek doesn't raise a problem here seekStream.seek(0); @@ -410,7 +410,7 @@ public void testSelectSeekFullLandsat() throws Throwable { assertEquals("byte at seek position", dataset[(int) seekStream.getPos()], seekStream.read()); assertEquals("Seek bytes skipped in " + streamStats, - seekRange, streamStats.bytesSkippedOnSeek); + seekRange, streamStats.getBytesSkippedOnSeek()); long offset; long increment = 64 * _1KB; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectMRJob.java index 181d797767397..52a591384772a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3SelectMRJob.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.s3a.select; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy; @@ -30,7 +31,6 @@ import org.apache.hadoop.examples.WordCount; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.impl.FutureIOSupport; -import org.apache.hadoop.fs.impl.WrappedIOException; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.S3AUtils; @@ -209,7 +209,7 @@ private String readStringFromFile(Path path) throws IOException { IOUtils.readFully(in, buffer, 0, bytesLen); return new String(buffer); } catch (IOException ex) { - throw new WrappedIOException(ex); + throw new UncheckedIOException(ex); } })); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java new file mode 100644 index 0000000000000..e7696996dbd1a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.fs.statistics.IOStatistics; + +import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_ENDPOINT; +import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getLandsatCSVPath; +import static org.apache.hadoop.fs.s3a.Statistic.STORE_IO_REQUEST; +import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticCounter; + +/** + * Verify that AWS SDK statistics are wired up. + * This test tries to read data from US-east-1 and us-west-2 buckets + * so as to be confident that the nuances of region mapping + * are handed correctly (HADOOP-13551). + * The statistics are probed to verify that the wiring up is complete. + */ +public class ITestAWSStatisticCollection extends AbstractS3ATestBase { + + private static final Path COMMON_CRAWL_PATH + = new Path("s3a://osm-pds/planet/planet-latest.orc"); + + @Test + public void testLandsatStatistics() throws Throwable { + final Configuration conf = getConfiguration(); + // skips the tests if the landsat path isn't the default. + Path path = getLandsatCSVPath(conf); + conf.set(ENDPOINT, DEFAULT_ENDPOINT); + conf.unset("fs.s3a.bucket.landsat-pds.endpoint"); + + try (S3AFileSystem fs = (S3AFileSystem) path.getFileSystem(conf)) { + fs.getObjectMetadata(path); + IOStatistics iostats = fs.getIOStatistics(); + assertThatStatisticCounter(iostats, + STORE_IO_REQUEST.getSymbol()) + .isGreaterThanOrEqualTo(1); + } + } + + @Test + public void testCommonCrawlStatistics() throws Throwable { + final Configuration conf = getConfiguration(); + // skips the tests if the landsat path isn't the default. + getLandsatCSVPath(conf); + + Path path = COMMON_CRAWL_PATH; + conf.set(ENDPOINT, DEFAULT_ENDPOINT); + + try (S3AFileSystem fs = (S3AFileSystem) path.getFileSystem(conf)) { + fs.getObjectMetadata(path); + IOStatistics iostats = fs.getIOStatistics(); + assertThatStatisticCounter(iostats, + STORE_IO_REQUEST.getSymbol()) + .isGreaterThanOrEqualTo(1); + } + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAggregateIOStatistics.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAggregateIOStatistics.java new file mode 100644 index 0000000000000..c85651d8ab6c9 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAggregateIOStatistics.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import java.io.File; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; + +import org.junit.Test; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot; +import org.apache.hadoop.util.JsonSerialization; + +import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE; +import static java.time.temporal.ChronoField.HOUR_OF_DAY; +import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; +import static java.time.temporal.ChronoField.NANO_OF_SECOND; +import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; + +/** + * Saves, loads and logs the aggregate IOStatistics as collected in this + * process. + */ +public class ITestAggregateIOStatistics extends AbstractS3ATestBase { + + @Test + public void testSaveStatisticsLocal() throws Throwable { + IOStatisticsSnapshot iostats = FILESYSTEM_IOSTATS; + iostats.aggregate(getFileSystem().getIOStatistics()); + JsonSerialization serializer + = IOStatisticsSnapshot.serializer(); + File outputDir = createOutputDir(); + File file = new File(outputDir, outputFilename()); + serializer.save(file, iostats); + IOStatisticsSnapshot loaded = serializer.load(file); + String s = serializer.toString(loaded); + LOG.info("Deserialized statistics in {}\n{}", + file, s); + } + + @Test + public void testSaveStatisticsS3() throws Throwable { + IOStatisticsSnapshot iostats = FILESYSTEM_IOSTATS; + JsonSerialization serializer + = IOStatisticsSnapshot.serializer(); + Path path = methodPath(); + serializer.save(getFileSystem(), path, iostats, true); + IOStatisticsSnapshot loaded = serializer.load(getFileSystem(), path); + String s = serializer.toString(loaded); + LOG.info("Deserialized statistics in {}\n{}", + path, s); + } + + protected File createOutputDir() { + String target = System.getProperty("test.build.dir", "target"); + File buildDir = new File(target, + this.getClass().getSimpleName()).getAbsoluteFile(); + buildDir.mkdirs(); + return buildDir; + } + + protected String outputFilename() { + LocalDateTime now = LocalDateTime.now(); + DateTimeFormatter formatter = new DateTimeFormatterBuilder() + .parseCaseInsensitive() + .append(ISO_LOCAL_DATE) + .appendLiteral('-') + .appendValue(HOUR_OF_DAY, 2) + .appendLiteral('.') + .appendValue(MINUTE_OF_HOUR, 2) + .optionalStart() + .appendLiteral('.') + .appendValue(SECOND_OF_MINUTE, 2) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 0, 9, true) + .toFormatter(); + return String.format("iostats-%s.json", + now.format(formatter)); + + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AContractStreamIOStatistics.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AContractStreamIOStatistics.java new file mode 100644 index 0000000000000..8bed174fd31e1 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AContractStreamIOStatistics.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.statistics; + +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractStreamIOStatisticsTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.fs.contract.s3a.S3AContract; +import org.apache.hadoop.fs.statistics.StreamStatisticNames; + +import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeEnableS3Guard; +import static org.apache.hadoop.fs.statistics.StreamStatisticNames.*; + +/** + * Test the S3A Streams IOStatistics support. + */ +public class ITestS3AContractStreamIOStatistics extends + AbstractContractStreamIOStatisticsTest { + + @Override + protected Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + // patch in S3Guard options + maybeEnableS3Guard(conf); + return conf; + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return new S3AContract(conf); + } + + /** + * Keys which the input stream must support. + * @return a list of keys + */ + public List inputStreamStatisticKeys() { + return Arrays.asList( + StreamStatisticNames.STREAM_READ_ABORTED, + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_ABORT, + StreamStatisticNames.STREAM_READ_CLOSED, + StreamStatisticNames.STREAM_READ_BYTES_DISCARDED_CLOSE, + StreamStatisticNames.STREAM_READ_CLOSE_OPERATIONS, + StreamStatisticNames.STREAM_READ_OPENED, + StreamStatisticNames.STREAM_READ_BYTES, + StreamStatisticNames.STREAM_READ_EXCEPTIONS, + StreamStatisticNames.STREAM_READ_FULLY_OPERATIONS, + StreamStatisticNames.STREAM_READ_OPERATIONS, + StreamStatisticNames.STREAM_READ_OPERATIONS_INCOMPLETE, + StreamStatisticNames.STREAM_READ_VERSION_MISMATCHES, + StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS, + StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS, + StreamStatisticNames.STREAM_READ_SEEK_FORWARD_OPERATIONS, + StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS, + StreamStatisticNames.STREAM_READ_SEEK_BYTES_DISCARDED, + StreamStatisticNames.STREAM_READ_SEEK_BYTES_SKIPPED + ); + } + + /** + * Keys which the output stream must support. + * @return a list of keys + */ + @Override + public List outputStreamStatisticKeys() { + return Arrays.asList(STREAM_WRITE_BYTES, + STREAM_WRITE_BLOCK_UPLOADS, + STREAM_WRITE_EXCEPTIONS); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/ExtraAssertions.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/ExtraAssertions.java index 28b34328ed11c..77c7736575c39 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/ExtraAssertions.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/ExtraAssertions.java @@ -23,11 +23,13 @@ import java.util.List; import java.util.stream.Collectors; +import org.assertj.core.api.Assertions; import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.Abortable; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; @@ -149,4 +151,31 @@ protected void assertStatusCode(AWSServiceIOException e, int code) throw e; } } + + + /** + * Assert that an abort was completely successful in that it + * was not a no-op and no exception was raised during + * cleanup. + * @param result result to assert over + */ + public static void assertCompleteAbort( + Abortable.AbortableResult result) { + Assertions.assertThat(result) + .describedAs("Abort operation result %s", result) + .matches(r -> !r.alreadyClosed()) + .matches(r -> r.anyCleanupException() == null); + } + + /** + * Assert that an abort was a no-op as the + * stream had already closed/aborted. + * @param result result to assert over + */ + public static void assertNoopAbort( + Abortable.AbortableResult result) { + Assertions.assertThat(result) + .describedAs("Abort operation result %s", result) + .matches(r -> r.alreadyClosed()); + } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalListingOperationCallbacks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalListingOperationCallbacks.java new file mode 100644 index 0000000000000..f683a0a008caf --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalListingOperationCallbacks.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.test; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3ALocatedFileStatus; +import org.apache.hadoop.fs.s3a.S3ListRequest; +import org.apache.hadoop.fs.s3a.S3ListResult; +import org.apache.hadoop.fs.s3a.impl.ListingOperationCallbacks; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.statistics.DurationTrackerFactory; + +/** + * Stub implementation of {@link ListingOperationCallbacks}. + */ +public class MinimalListingOperationCallbacks + implements ListingOperationCallbacks { + + @Override + public CompletableFuture listObjectsAsync( + final S3ListRequest request, + final DurationTrackerFactory trackerFactory) throws IOException { + return null; + } + + @Override + public CompletableFuture continueListObjectsAsync( + final S3ListRequest request, + final S3ListResult prevResult, + final DurationTrackerFactory trackerFactory) throws IOException { + return null; + } + + @Override + public S3ALocatedFileStatus toLocatedFileStatus( + S3AFileStatus status) throws IOException { + return null; + } + + @Override + public S3ListRequest createListObjectsRequest( + String key, + String delimiter) { + return null; + } + + @Override + public long getDefaultBlockSize(Path path) { + return 0; + } + + @Override + public int getMaxKeys() { + return 0; + } + + @Override + public ITtlTimeProvider getUpdatedTtlTimeProvider() { + return null; + } + + @Override + public boolean allowAuthoritative(Path p) { + return false; + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalOperationCallbacks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalOperationCallbacks.java new file mode 100644 index 0000000000000..a50b944c798ca --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalOperationCallbacks.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.test; + +import java.io.IOException; +import java.util.List; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.DeleteObjectsResult; +import com.amazonaws.services.s3.model.MultiObjectDeleteException; +import com.amazonaws.services.s3.transfer.model.CopyResult; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3ALocatedFileStatus; +import org.apache.hadoop.fs.s3a.S3AReadOpContext; +import org.apache.hadoop.fs.s3a.S3ObjectAttributes; +import org.apache.hadoop.fs.s3a.impl.OperationCallbacks; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; + +/** + * Stub implementation of {@link OperationCallbacks}. + */ +public class MinimalOperationCallbacks + implements OperationCallbacks { + + @Override + public S3ObjectAttributes createObjectAttributes( + Path path, + String eTag, + String versionId, + long len) { + return null; + } + + @Override + public S3ObjectAttributes createObjectAttributes( + S3AFileStatus fileStatus) { + return null; + } + + @Override + public S3AReadOpContext createReadContext( + FileStatus fileStatus) { + return null; + } + + @Override + public void finishRename( + Path sourceRenamed, + Path destCreated) + throws IOException { + + } + + @Override + public void deleteObjectAtPath( + Path path, + String key, + boolean isFile, + BulkOperationState operationState) + throws IOException { + + } + + @Override + public RemoteIterator listFilesAndDirectoryMarkers( + final Path path, + final S3AFileStatus status, + final boolean collectTombstones, + final boolean includeSelf) throws IOException { + return null; + } + + @Override + public CopyResult copyFile( + String srcKey, + String destKey, + S3ObjectAttributes srcAttributes, + S3AReadOpContext readContext) + throws IOException { + return null; + } + + @Override + public DeleteObjectsResult removeKeys( + List keysToDelete, + boolean deleteFakeDir, + List undeletedObjectsOnFailure, + BulkOperationState operationState, + boolean quiet) + throws MultiObjectDeleteException, AmazonClientException, + IOException { + return null; + } + + @Override + public boolean allowAuthoritative(Path p) { + return false; + } + + @Override + public RemoteIterator listObjects( + Path path, + String key) + throws IOException { + return null; + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/OperationTrackingStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/OperationTrackingStore.java new file mode 100644 index 0000000000000..1bf0c3e6fc1e9 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/OperationTrackingStore.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.test; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.impl.StoreContext; +import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState; +import org.apache.hadoop.fs.s3a.s3guard.DirListingMetadata; +import org.apache.hadoop.fs.s3a.s3guard.ITtlTimeProvider; +import org.apache.hadoop.fs.s3a.s3guard.MetadataStore; +import org.apache.hadoop.fs.s3a.s3guard.PathMetadata; +import org.apache.hadoop.fs.s3a.s3guard.RenameTracker; + +/** + * MetadataStore which tracks what is deleted and added. + */ +public class OperationTrackingStore implements MetadataStore { + + private final List deleted = new ArrayList<>(); + + private final List created = new ArrayList<>(); + + @Override + public void initialize(final FileSystem fs, + ITtlTimeProvider ttlTimeProvider) { + } + + @Override + public void initialize(final Configuration conf, + ITtlTimeProvider ttlTimeProvider) { + } + + @Override + public void forgetMetadata(final Path path) { + } + + @Override + public PathMetadata get(final Path path) { + return null; + } + + @Override + public PathMetadata get(final Path path, + final boolean wantEmptyDirectoryFlag) { + return null; + } + + @Override + public DirListingMetadata listChildren(final Path path) { + return null; + } + + @Override + public void put(final PathMetadata meta) { + put(meta, null); + } + + @Override + public void put(final PathMetadata meta, + final BulkOperationState operationState) { + created.add(meta.getFileStatus().getPath()); + } + + @Override + public void put(final Collection metas, + final BulkOperationState operationState) { + metas.stream().forEach(meta -> put(meta, null)); + } + + @Override + public void put(final DirListingMetadata meta, + final List unchangedEntries, + final BulkOperationState operationState) { + created.add(meta.getPath()); + } + + @Override + public void destroy() { + } + + @Override + public void delete(final Path path, + final BulkOperationState operationState) { + deleted.add(path); + } + + @Override + public void deletePaths(final Collection paths, + @Nullable final BulkOperationState operationState) + throws IOException { + deleted.addAll(paths); + } + + @Override + public void deleteSubtree(final Path path, + final BulkOperationState operationState) { + + } + + @Override + public void move(@Nullable final Collection pathsToDelete, + @Nullable final Collection pathsToCreate, + @Nullable final BulkOperationState operationState) { + } + + @Override + public void prune(final PruneMode pruneMode, final long cutoff) { + } + + @Override + public long prune(final PruneMode pruneMode, + final long cutoff, + final String keyPrefix) { + return 0; + } + + @Override + public BulkOperationState initiateBulkWrite( + final BulkOperationState.OperationType operation, + final Path dest) { + return null; + } + + @Override + public void setTtlTimeProvider(ITtlTimeProvider ttlTimeProvider) { + } + + @Override + public Map getDiagnostics() { + return null; + } + + @Override + public void updateParameters(final Map parameters) { + } + + @Override + public void close() { + } + + public List getDeleted() { + return deleted; + } + + public List getCreated() { + return created; + } + + @Override + public RenameTracker initiateRenameOperation( + final StoreContext storeContext, + final Path source, + final S3AFileStatus sourceStatus, + final Path dest) { + throw new UnsupportedOperationException("unsupported"); + } + + @Override + public void addAncestors(final Path qualifiedPath, + @Nullable final BulkOperationState operationState) { + + } +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/AbstractMarkerToolTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/AbstractMarkerToolTest.java new file mode 100644 index 0000000000000..797d33c8d90d7 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/AbstractMarkerToolTest.java @@ -0,0 +1,337 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.tools; + +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.net.URI; +import java.util.List; + +import org.assertj.core.api.Assertions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.StringUtils; + +import static org.apache.hadoop.fs.s3a.Constants.*; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBucketOverrides; +import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.VERBOSE; +import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommand; +import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommandToFailure; +import static org.apache.hadoop.fs.s3a.tools.MarkerTool.UNLIMITED_LISTING; + +/** + * Class for marker tool tests -sets up keeping/deleting filesystems, + * has methods to invoke. + */ +public class AbstractMarkerToolTest extends AbstractS3ATestBase { + + private static final Logger LOG = + LoggerFactory.getLogger(AbstractMarkerToolTest.class); + + /** the -verbose option. */ + protected static final String V = AbstractMarkerToolTest.m(VERBOSE); + + /** FS which keeps markers. */ + private S3AFileSystem keepingFS; + + /** FS which deletes markers. */ + private S3AFileSystem deletingFS; + + /** FS which mixes markers; only created in some tests. */ + private S3AFileSystem mixedFS; + @Override + protected Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + String bucketName = getTestBucketName(conf); + removeBaseAndBucketOverrides(bucketName, conf, + S3A_BUCKET_PROBE, + DIRECTORY_MARKER_POLICY, + S3_METADATA_STORE_IMPL, + METADATASTORE_AUTHORITATIVE, + AUTHORITATIVE_PATH); + // base FS is legacy + conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_DELETE); + conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); + + // turn off bucket probes for a bit of speedup in the connectors we create. + conf.setInt(S3A_BUCKET_PROBE, 0); + return conf; + } + + @Override + public void setup() throws Exception { + super.setup(); + setKeepingFS(createFS(DIRECTORY_MARKER_POLICY_KEEP, null)); + setDeletingFS(createFS(DIRECTORY_MARKER_POLICY_DELETE, null)); + } + + @Override + public void teardown() throws Exception { + // do this ourselves to avoid audits teardown failing + // when surplus markers are found + deleteTestDirInTeardown(); + super.teardown(); + IOUtils.cleanupWithLogger(LOG, getKeepingFS(), + getMixedFS(), getDeletingFS()); + + } + + /** + * FS which deletes markers. + */ + public S3AFileSystem getDeletingFS() { + return deletingFS; + } + + public void setDeletingFS(final S3AFileSystem deletingFS) { + this.deletingFS = deletingFS; + } + + /** + * FS which keeps markers. + */ + protected S3AFileSystem getKeepingFS() { + return keepingFS; + } + + private void setKeepingFS(S3AFileSystem keepingFS) { + this.keepingFS = keepingFS; + } + + /** only created on demand. */ + private S3AFileSystem getMixedFS() { + return mixedFS; + } + + protected void setMixedFS(S3AFileSystem mixedFS) { + this.mixedFS = mixedFS; + } + + /** + * Get a filename for a temp file. + * The generated file is deleted. + * + * @return a file path for a output file + */ + protected File tempAuditFile() throws IOException { + final File audit = File.createTempFile("audit", ".txt"); + audit.delete(); + return audit; + } + + /** + * Read the audit output and verify it has the expected number of lines. + * @param auditFile audit file to read + * @param expected expected line count + */ + protected void expectMarkersInOutput(final File auditFile, + final int expected) + throws IOException { + final List lines = readOutput(auditFile); + Assertions.assertThat(lines) + .describedAs("Content of %s", auditFile) + .hasSize(expected); + } + + /** + * Read the output file in. Logs the contents at info. + * @param outputFile audit output file. + * @return the lines + */ + protected List readOutput(final File outputFile) + throws IOException { + try (FileReader reader = new FileReader(outputFile)) { + final List lines = + org.apache.commons.io.IOUtils.readLines(reader); + + LOG.info("contents of output file {}\n{}", outputFile, + StringUtils.join("\n", lines)); + return lines; + } + } + + /** + * Create a new FS with given marker policy and path. + * This filesystem MUST be closed in test teardown. + * @param markerPolicy markers + * @param authPath authoritative path. If null: no path. + * @return a new FS. + */ + protected S3AFileSystem createFS(String markerPolicy, + String authPath) throws Exception { + S3AFileSystem testFS = getFileSystem(); + Configuration conf = new Configuration(testFS.getConf()); + URI testFSUri = testFS.getUri(); + String bucketName = getTestBucketName(conf); + removeBucketOverrides(bucketName, conf, + DIRECTORY_MARKER_POLICY, + S3_METADATA_STORE_IMPL, + BULK_DELETE_PAGE_SIZE, + AUTHORITATIVE_PATH); + if (authPath != null) { + conf.set(AUTHORITATIVE_PATH, authPath); + } + // Use a very small page size to force the paging + // code to be tested. + conf.setInt(BULK_DELETE_PAGE_SIZE, 2); + conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); + conf.set(DIRECTORY_MARKER_POLICY, markerPolicy); + S3AFileSystem fs2 = new S3AFileSystem(); + fs2.initialize(testFSUri, conf); + LOG.info("created new filesystem with policy {} and auth path {}", + markerPolicy, + (authPath == null ? "(null)": authPath)); + return fs2; + } + + /** + * Execute the marker tool, expecting the execution to succeed. + * @param sourceFS filesystem to use + * @param path path to scan + * @param doPurge should markers be purged + * @param expectedMarkerCount number of markers expected + * @return the result + */ + protected MarkerTool.ScanResult markerTool( + final FileSystem sourceFS, + final Path path, + final boolean doPurge, + final int expectedMarkerCount) + throws IOException { + return markerTool(0, sourceFS, path, doPurge, + expectedMarkerCount, + UNLIMITED_LISTING, false); + } + + /** + * Run a S3GuardTool command from a varags list and the + * configuration returned by {@code getConfiguration()}. + * @param args argument list + * @return the return code + * @throws Exception any exception + */ + protected int run(Object... args) throws Exception { + return runS3GuardCommand(uncachedFSConfig(getConfiguration()), args); + } + + /** + * Take a configuration, copy it and disable FS Caching on + * the new one. + * @param conf source config + * @return a new, patched, config + */ + protected Configuration uncachedFSConfig(final Configuration conf) { + Configuration c = new Configuration(conf); + disableFilesystemCaching(c); + return c; + } + + /** + * given an FS instance, create a matching configuration where caching + * is disabled. + * @param fs source + * @return new config. + */ + protected Configuration uncachedFSConfig(final FileSystem fs) { + return uncachedFSConfig(fs.getConf()); + } + + /** + * Run a S3GuardTool command from a varags list, catch any raised + * ExitException and verify the status code matches that expected. + * @param status expected status code of the exception + * @param args argument list + * @throws Exception any exception + */ + protected void runToFailure(int status, Object... args) + throws Exception { + Configuration conf = uncachedFSConfig(getConfiguration()); + runS3GuardCommandToFailure(conf, status, args); + } + + /** + * Given a base and a filename, create a new path. + * @param base base path + * @param name name: may be empty, in which case the base path is returned + * @return a path + */ + protected static Path toPath(final Path base, final String name) { + return name.isEmpty() ? base : new Path(base, name); + } + + /** + * Execute the marker tool, expecting the execution to + * return a specific exit code. + * + * @param sourceFS filesystem to use + * @param exitCode exit code to expect. + * @param path path to scan + * @param doPurge should markers be purged + * @param expectedMarkers number of markers expected + * @param limit limit of files to scan; -1 for 'unlimited' + * @param nonAuth only use nonauth path count for failure rules + * @return the result + */ + public static MarkerTool.ScanResult markerTool( + final int exitCode, + final FileSystem sourceFS, + final Path path, + final boolean doPurge, + final int expectedMarkers, + final int limit, + final boolean nonAuth) throws IOException { + + MarkerTool.ScanResult result = MarkerTool.execMarkerTool( + new MarkerTool.ScanArgsBuilder() + .withSourceFS(sourceFS) + .withPath(path) + .withDoPurge(doPurge) + .withMinMarkerCount(expectedMarkers) + .withMaxMarkerCount(expectedMarkers) + .withLimit(limit) + .withNonAuth(nonAuth) + .build()); + Assertions.assertThat(result.getExitCode()) + .describedAs("Exit code of marker(%s, %s, %d) -> %s", + path, doPurge, expectedMarkers, result) + .isEqualTo(exitCode); + return result; + } + + /** + * Add a "-" prefix to a string. + * @param s string to prefix + * @return a string for passing into the CLI + */ + protected static String m(String s) { + return "-" + s; + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerTool.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerTool.java new file mode 100644 index 0000000000000..fc1abc19dd8d8 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerTool.java @@ -0,0 +1,549 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.tools; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.s3a.S3AFileSystem; + +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_AUTHORITATIVE; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE; +import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP; +import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.BucketInfo.BUCKET_INFO; +import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommand; +import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommandToFailure; +import static org.apache.hadoop.fs.s3a.tools.MarkerTool.*; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_INTERRUPTED; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_NOT_ACCEPTABLE; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_NOT_FOUND; +import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_USAGE; + +/** + * Test the marker tool and use it to compare the behavior + * of keeping vs legacy S3A FS instances. + */ +public class ITestMarkerTool extends AbstractMarkerToolTest { + + protected static final Logger LOG = + LoggerFactory.getLogger(ITestMarkerTool.class); + + /** + * How many files to expect. + */ + private int expectedFileCount; + + /** + * How many markers to expect under dir1. + */ + private int expectedMarkersUnderDir1; + + /** + * How many markers to expect under dir2. + */ + private int expectedMarkersUnderDir2; + + /** + * How many markers to expect across both dirs? + */ + private int expectedMarkers; + + /** + * How many markers to expect including the base directory? + */ + private int expectedMarkersWithBaseDir; + + + @Test + public void testCleanMarkersLegacyDir() throws Throwable { + describe("Clean markers under a deleting FS -expect none"); + CreatedPaths createdPaths = createPaths(getDeletingFS(), methodPath()); + markerTool(getDeletingFS(), createdPaths.base, false, 0); + markerTool(getDeletingFS(), createdPaths.base, true, 0); + } + + @Test + public void testCleanMarkersFileLimit() throws Throwable { + describe("Clean markers under a keeping FS -with file limit"); + CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath()); + + // audit will be interrupted + markerTool(EXIT_INTERRUPTED, getDeletingFS(), + createdPaths.base, false, 0, 1, false); + } + + @Test + public void testCleanMarkersKeepingDir() throws Throwable { + describe("Audit then clean markers under a deleting FS " + + "-expect markers to be found and then cleaned up"); + CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath()); + + // audit will find the expected entries + int expectedMarkerCount = createdPaths.dirs.size(); + S3AFileSystem fs = getDeletingFS(); + LOG.info("Auditing a directory with retained markers -expect failure"); + markerTool(EXIT_NOT_ACCEPTABLE, fs, + createdPaths.base, false, 0, UNLIMITED_LISTING, false); + + LOG.info("Auditing a directory expecting retained markers"); + markerTool(fs, createdPaths.base, false, + expectedMarkerCount); + + // we require that a purge didn't take place, so run the + // audit again. + LOG.info("Auditing a directory expecting retained markers"); + markerTool(fs, createdPaths.base, false, + expectedMarkerCount); + + LOG.info("Purging a directory of retained markers"); + // purge cleans up + assertMarkersDeleted(expectedMarkerCount, + markerTool(fs, createdPaths.base, true, expectedMarkerCount)); + // and a rerun doesn't find markers + LOG.info("Auditing a directory with retained markers -expect success"); + assertMarkersDeleted(0, + markerTool(fs, createdPaths.base, true, 0)); + } + + @Test + public void testRenameKeepingFS() throws Throwable { + describe("Rename with the keeping FS -verify that no markers" + + " exist at far end"); + Path base = methodPath(); + Path source = new Path(base, "source"); + Path dest = new Path(base, "dest"); + + S3AFileSystem fs = getKeepingFS(); + CreatedPaths createdPaths = createPaths(fs, source); + + // audit will find three entries + int expectedMarkerCount = createdPaths.dirs.size(); + + markerTool(fs, source, false, expectedMarkerCount); + fs.rename(source, dest); + assertIsDirectory(dest); + + // there are no markers + markerTool(fs, dest, false, 0); + LOG.info("Auditing destination paths"); + verifyRenamed(dest, createdPaths); + } + + /** + * Create a FS where only dir2 in the source tree keeps markers; + * verify all is good. + */ + @Test + public void testAuthPathIsMixed() throws Throwable { + describe("Create a source tree with mixed semantics"); + Path base = methodPath(); + Path source = new Path(base, "source"); + Path dest = new Path(base, "dest"); + Path dir2 = new Path(source, "dir2"); + S3AFileSystem mixedFSDir2 = createFS(DIRECTORY_MARKER_POLICY_AUTHORITATIVE, + dir2.toUri().toString()); + // line up for close in teardown + setMixedFS(mixedFSDir2); + // some of these paths will retain markers, some will not + CreatedPaths createdPaths = createPaths(mixedFSDir2, source); + + // markers are only under dir2 + markerTool(mixedFSDir2, toPath(source, "dir1"), false, 0); + markerTool(mixedFSDir2, source, false, expectedMarkersUnderDir2); + + // full scan of source will fail + markerTool(EXIT_NOT_ACCEPTABLE, + mixedFSDir2, source, false, 0, 0, false); + + // but add the -nonauth option and the markers under dir2 are skipped + markerTool(0, mixedFSDir2, source, false, 0, 0, true); + + // if we now rename, all will be good + LOG.info("Executing rename"); + mixedFSDir2.rename(source, dest); + assertIsDirectory(dest); + + // there are no markers + MarkerTool.ScanResult scanResult = markerTool(mixedFSDir2, dest, false, 0); + // there are exactly the files we want + Assertions.assertThat(scanResult) + .describedAs("Scan result %s", scanResult) + .extracting(s -> s.getTracker().getFilesFound()) + .isEqualTo(expectedFileCount); + verifyRenamed(dest, createdPaths); + } + + /** + * Assert that an expected number of markers were deleted. + * @param expected expected count. + * @param result scan result + */ + private static void assertMarkersDeleted(int expected, + MarkerTool.ScanResult result) { + + Assertions.assertThat(result.getPurgeSummary()) + .describedAs("Purge result of scan %s", result) + .isNotNull() + .extracting(f -> f.getMarkersDeleted()) + .isEqualTo(expected); + } + + /** + * Marker tool with no args. + */ + @Test + public void testRunNoArgs() throws Throwable { + runToFailure(EXIT_USAGE, MARKERS); + } + + @Test + public void testRunWrongBucket() throws Throwable { + runToFailure(EXIT_NOT_FOUND, MARKERS, + AUDIT, + "s3a://this-bucket-does-not-exist-hopefully"); + } + + /** + * Run with a path that doesn't exist. + */ + @Test + public void testRunUnknownPath() throws Throwable { + runToFailure(EXIT_NOT_FOUND, MARKERS, + AUDIT, + methodPath()); + } + + /** + * Having both -audit and -clean on the command line is an error. + */ + @Test + public void testRunTooManyActions() throws Throwable { + runToFailure(EXIT_USAGE, MARKERS, + AUDIT, CLEAN, + methodPath()); + } + + @Test + public void testRunAuditWithExpectedMarkers() throws Throwable { + describe("Run a verbose audit expecting some markers"); + // a run under the keeping FS will create paths + CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath()); + final File audit = tempAuditFile(); + run(MARKERS, V, + AUDIT, + m(OPT_LIMIT), 0, + m(OPT_OUT), audit, + m(OPT_MIN), expectedMarkersWithBaseDir - 1, + m(OPT_MAX), expectedMarkersWithBaseDir + 1, + createdPaths.base); + expectMarkersInOutput(audit, expectedMarkersWithBaseDir); + } + + @Test + public void testRunAuditWithExpectedMarkersSwappedMinMax() throws Throwable { + describe("Run a verbose audit with the min/max ranges swapped;" + + " see HADOOP-17332"); + // a run under the keeping FS will create paths + CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath()); + final File audit = tempAuditFile(); + run(MARKERS, V, + AUDIT, + m(OPT_LIMIT), 0, + m(OPT_OUT), audit, + m(OPT_MIN), expectedMarkersWithBaseDir + 1, + m(OPT_MAX), expectedMarkersWithBaseDir - 1, + createdPaths.base); + expectMarkersInOutput(audit, expectedMarkersWithBaseDir); + } + + @Test + public void testRunAuditWithExcessMarkers() throws Throwable { + describe("Run a verbose audit failing as surplus markers were found"); + // a run under the keeping FS will create paths + CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath()); + final File audit = tempAuditFile(); + runToFailure(EXIT_NOT_ACCEPTABLE, MARKERS, V, + AUDIT, + m(OPT_OUT), audit, + createdPaths.base); + expectMarkersInOutput(audit, expectedMarkersWithBaseDir); + } + + @Test + public void testRunLimitedAudit() throws Throwable { + describe("Audit with a limited number of files (2)"); + CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath()); + runToFailure(EXIT_INTERRUPTED, + MARKERS, V, + m(OPT_LIMIT), 2, + CLEAN, + createdPaths.base); + } + + /** + * Run an audit against the landsat bucket. + *

    + * This tests paging/scale against a larger bucket without + * worrying about setup costs. + */ + @Test + public void testRunLimitedLandsatAudit() throws Throwable { + describe("Audit a few thousand landsat objects"); + final File audit = tempAuditFile(); + + runToFailure(EXIT_INTERRUPTED, + MARKERS, + AUDIT, + m(OPT_LIMIT), 3000, + m(OPT_OUT), audit, + LANDSAT_BUCKET); + readOutput(audit); + } + + @Test + public void testBucketInfoKeepingOnDeleting() throws Throwable { + describe("Run bucket info with the keeping config on the deleting fs"); + runS3GuardCommandToFailure(uncachedFSConfig(getDeletingFS()), + EXIT_NOT_ACCEPTABLE, + BUCKET_INFO, + m(MARKERS), DIRECTORY_MARKER_POLICY_KEEP, + methodPath()); + } + + @Test + public void testBucketInfoKeepingOnKeeping() throws Throwable { + describe("Run bucket info with the keeping config on the keeping fs"); + runS3GuardCommand(uncachedFSConfig(getKeepingFS()), + BUCKET_INFO, + m(MARKERS), DIRECTORY_MARKER_POLICY_KEEP, + methodPath()); + } + + @Test + public void testBucketInfoDeletingOnDeleting() throws Throwable { + describe("Run bucket info with the deleting config on the deleting fs"); + runS3GuardCommand(uncachedFSConfig(getDeletingFS()), + BUCKET_INFO, + m(MARKERS), DIRECTORY_MARKER_POLICY_DELETE, + methodPath()); + } + + @Test + public void testBucketInfoAuthOnAuth() throws Throwable { + describe("Run bucket info with the auth FS"); + Path base = methodPath(); + + S3AFileSystem authFS = createFS(DIRECTORY_MARKER_POLICY_AUTHORITATIVE, + base.toUri().toString()); + // line up for close in teardown + setMixedFS(authFS); + runS3GuardCommand(uncachedFSConfig(authFS), + BUCKET_INFO, + m(MARKERS), DIRECTORY_MARKER_POLICY_AUTHORITATIVE, + methodPath()); + } + + /** + * Tracker of created paths. + */ + private static final class CreatedPaths { + + private final FileSystem fs; + + private final Path base; + + private List files = new ArrayList<>(); + + private List dirs = new ArrayList<>(); + + private List emptyDirs = new ArrayList<>(); + + private List filesUnderBase = new ArrayList<>(); + + private List dirsUnderBase = new ArrayList<>(); + + private List emptyDirsUnderBase = new ArrayList<>(); + + /** + * Constructor. + * @param fs filesystem. + * @param base base directory for all creation operations. + */ + private CreatedPaths(final FileSystem fs, + final Path base) { + this.fs = fs; + this.base = base; + } + + /** + * Make a set of directories. + * @param names varargs list of paths under the base. + * @return number of entries created. + * @throws IOException failure + */ + private int dirs(String... names) throws IOException { + for (String name : names) { + mkdir(name); + } + return names.length; + } + + /** + * Create a single directory under the base. + * @param name name/relative names of the directory + * @return the path of the new entry. + */ + private Path mkdir(String name) throws IOException { + Path dir = toPath(base, name); + fs.mkdirs(dir); + dirs.add(dir); + dirsUnderBase.add(name); + return dir; + } + + /** + * Make a set of empty directories. + * @param names varargs list of paths under the base. + * @return number of entries created. + * @throws IOException failure + */ + private int emptydirs(String... names) throws IOException { + for (String name : names) { + emptydir(name); + } + return names.length; + } + + /** + * Create an empty directory. + * @param name name under the base dir + * @return the path + * @throws IOException failure + */ + private Path emptydir(String name) throws IOException { + Path dir = toPath(base, name); + fs.mkdirs(dir); + emptyDirs.add(dir); + emptyDirsUnderBase.add(name); + return dir; + } + + /** + * Make a set of files. + * @param names varargs list of paths under the base. + * @return number of entries created. + * @throws IOException failure + */ + private int files(String... names) throws IOException { + for (String name : names) { + mkfile(name); + } + return names.length; + } + + /** + * Create a 0-byte file. + * @param name name under the base dir + * @return the path + * @throws IOException failure + */ + private Path mkfile(String name) + throws IOException { + Path file = toPath(base, name); + ContractTestUtils.touch(fs, file); + files.add(file); + filesUnderBase.add(name); + return file; + } + } + + /** + * Create the "standard" test paths. + * @param fs filesystem + * @param base base dir + * @return the details on what was created. + */ + private CreatedPaths createPaths(FileSystem fs, Path base) + throws IOException { + CreatedPaths r = new CreatedPaths(fs, base); + // the directories under which we will create files, + // so expect to have markers + r.mkdir(""); + + // create the empty dirs + r.emptydir("empty"); + + // dir 1 has a file underneath + r.mkdir("dir1"); + expectedFileCount = r.files("dir1/file1"); + + expectedMarkersUnderDir1 = 1; + + + // dir2 has a subdir + r.dirs("dir2", "dir2/dir3"); + // an empty subdir + r.emptydir("dir2/empty2"); + + // and a file under itself and dir3 + expectedFileCount += r.files( + "dir2/file2", + "dir2/dir3/file3"); + + + // wrap up the expectations. + expectedMarkersUnderDir2 = 2; + expectedMarkers = expectedMarkersUnderDir1 + expectedMarkersUnderDir2; + expectedMarkersWithBaseDir = expectedMarkers + 1; + return r; + } + + /** + * Verify that all the paths renamed from the source exist + * under the destination, including all empty directories. + * @param dest destination to look under. + * @param createdPaths list of created paths. + */ + void verifyRenamed(final Path dest, + final CreatedPaths createdPaths) throws IOException { + // all leaf directories exist + for (String p : createdPaths.emptyDirsUnderBase) { + assertIsDirectory(toPath(dest, p)); + } + // non-empty dirs + for (String p : createdPaths.dirsUnderBase) { + assertIsDirectory(toPath(dest, p)); + } + // all files exist + for (String p : createdPaths.filesUnderBase) { + assertIsFile(toPath(dest, p)); + } + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java new file mode 100644 index 0000000000000..02fec81513fca --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.tools; + +import java.io.File; + +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.runners.MethodSorters; + +import org.apache.hadoop.fs.Path; + +import static org.apache.hadoop.fs.s3a.tools.MarkerTool.AUDIT; +import static org.apache.hadoop.fs.s3a.tools.MarkerTool.CLEAN; +import static org.apache.hadoop.fs.s3a.tools.MarkerTool.MARKERS; +import static org.apache.hadoop.fs.s3a.tools.MarkerTool.OPT_OUT; + +/** + * Marker tool tests against the root FS; run in the sequential phase. + */ +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class ITestMarkerToolRootOperations extends AbstractMarkerToolTest { + + private Path rootPath; + + @Override + public void setup() throws Exception { + super.setup(); + rootPath = getFileSystem().makeQualified(new Path("/")); + } + + @Test + public void test_100_audit_root_noauth() throws Throwable { + describe("Run a verbose audit"); + final File audit = tempAuditFile(); + run(MARKERS, V, + AUDIT, + m(OPT_OUT), audit, + rootPath); + readOutput(audit); + } + + @Test + public void test_200_clean_root() throws Throwable { + describe("Clean the root path"); + final File audit = tempAuditFile(); + run(MARKERS, V, + CLEAN, + m(OPT_OUT), audit, + rootPath); + readOutput(audit); + } + +} diff --git a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml index f6b0e406b3bb2..a5d98a32e6fc2 100644 --- a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml +++ b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml @@ -48,10 +48,25 @@ - fs.contract.rename-returns-false-if-source-missing + fs.contract.rename-creates-dest-dirs true + + fs.contract.rename-returns-false-if-source-missing + false + + + + fs.contract.rename-overwrites-dest + false + + + + fs.contract.rename-returns-false-if-dest-exists + false + + fs.contract.rename-remove-dest-if-empty-dir true diff --git a/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties b/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties index 6e20fbcda7efd..e8f3691caa6f0 100644 --- a/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties +++ b/hadoop-tools/hadoop-aws/src/test/resources/log4j.properties @@ -75,3 +75,11 @@ log4j.logger.org.apache.hadoop.fs.s3a.s3guard.Operations=DEBUG log4j.logger.org.apache.hadoop.mapreduce.lib.output=DEBUG log4j.logger.org.apache.hadoop.fs.s3a.S3AStorageStatistics=INFO + +# Set to debug if you need to debug S3A endpoint problems. +#log4j.logger.org.apache.hadoop.fs.s3a.DefaultS3ClientFactory=DEBUG + +# This causes all remote iterator stats +# to be logged when the RemoteIterators.foreach() method is +# invoked +#log4j.logger.org.apache.hadoop.util.functional.RemoteIterators=DEBUG diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml index 9952861a2f97d..6e98e6ea4cb46 100644 --- a/hadoop-tools/hadoop-azure-datalake/pom.xml +++ b/hadoop-tools/hadoop-azure-datalake/pom.xml @@ -19,7 +19,7 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-azure-datalake diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java index 278b815782aa8..75d90c78839ca 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java +++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java @@ -26,8 +26,8 @@ import java.util.List; import java.util.Map; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import com.microsoft.azure.datalake.store.ADLStoreClient; import com.microsoft.azure.datalake.store.ADLStoreOptions; import com.microsoft.azure.datalake.store.DirectoryEntry; diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFsOutputStream.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFsOutputStream.java index 2b89fb0a73242..dd4495319d670 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFsOutputStream.java +++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFsOutputStream.java @@ -22,6 +22,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; +import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; import java.io.IOException; @@ -42,7 +44,8 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving -public final class AdlFsOutputStream extends OutputStream implements Syncable { +public final class AdlFsOutputStream extends OutputStream + implements Syncable, StreamCapabilities { private final ADLFileOutputStream out; public AdlFsOutputStream(ADLFileOutputStream out, Configuration configuration) @@ -79,4 +82,9 @@ public synchronized void hflush() throws IOException { public synchronized void hsync() throws IOException { out.flush(); } + + @Override + public boolean hasCapability(String capability) { + return StoreImplementationUtils.isProbeForSyncable(capability); + } } diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRenameLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRenameLive.java index 3e3a010e17484..f684629b5d20b 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRenameLive.java +++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractRenameLive.java @@ -19,13 +19,9 @@ package org.apache.hadoop.fs.adl.live; -import org.junit.Test; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.test.LambdaTestUtils; /** * Test rename contract test cases on Adl file system. @@ -37,14 +33,4 @@ protected AbstractFSContract createContract(Configuration configuration) { return new AdlStorageContract(configuration); } - /** - * ADL throws an Access Control Exception rather than return false. - * This is caught and its error text checked, to catch regressions. - */ - @Test - public void testRenameFileUnderFile() throws Exception { - LambdaTestUtils.intercept(AccessControlException.class, - "Parent path is not a folder.", - super::testRenameFileUnderFile); - } } diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/resources/adls.xml b/hadoop-tools/hadoop-azure-datalake/src/test/resources/adls.xml index 5bbdd6fbb8645..4f5c99fbe0ca3 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/test/resources/adls.xml +++ b/hadoop-tools/hadoop-azure-datalake/src/test/resources/adls.xml @@ -38,6 +38,11 @@ true + + fs.contract.rename-returns-false-if-dest-exists + true + + fs.contract.test.random-seek-count 10 @@ -148,4 +153,14 @@ true + + fs.contract.supports-hflush + true + + + + fs.contract.supports-hsync + true + + diff --git a/hadoop-tools/hadoop-azure/.gitignore b/hadoop-tools/hadoop-azure/.gitignore index 837b481682ad2..0e17efaa1eb24 100644 --- a/hadoop-tools/hadoop-azure/.gitignore +++ b/hadoop-tools/hadoop-azure/.gitignore @@ -1,2 +1,5 @@ .checkstyle -bin/ \ No newline at end of file +bin/ +src/test/resources/combinationConfigFiles +src/test/resources/abfs-combination-test-configs.xml +dev-support/testlogs diff --git a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml index 38de35e897a0d..b750b8b91c79e 100644 --- a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml +++ b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml @@ -15,6 +15,23 @@ limitations under the License. --> + + + + + + + + + + + + + + @@ -24,7 +41,7 @@ + and helps performance. --> @@ -40,7 +57,7 @@ + method. --> @@ -57,4 +74,13 @@ + + + + + + + + diff --git a/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/runtests.sh b/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/runtests.sh new file mode 100755 index 0000000000000..d3d40621accc7 --- /dev/null +++ b/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/runtests.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +# shellcheck disable=SC2034 +# unused variables are global in nature and used in testsupport.sh + +set -eo pipefail + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# shellcheck disable=SC1091 +. dev-support/testrun-scripts/testsupport.sh + +begin + +### ADD THE TEST COMBINATIONS BELOW. DO NOT EDIT THE ABOVE LINES. + + +combination=HNS-OAuth +properties=("fs.azure.abfs.account.name" "fs.azure.test.namespace.enabled" +"fs.azure.account.auth.type") +values=("{account name}.dfs.core.windows.net" "true" "OAuth") +generateconfigs + +combination=HNS-SharedKey +properties=("fs.azure.abfs.account.name" "fs.azure.test.namespace.enabled" "fs.azure.account.auth.type") +values=("{account name}.dfs.core.windows.net" "true" "SharedKey") +generateconfigs + +combination=NonHNS-SharedKey +properties=("fs.azure.abfs.account.name" "fs.azure.test.namespace.enabled" "fs.azure.account.auth.type") +values=("{account name}.dfs.core.windows.net" "false" "SharedKey") +generateconfigs + + +### DO NOT EDIT THE LINES BELOW. + +runtests "$@" diff --git a/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/testsupport.sh b/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/testsupport.sh new file mode 100644 index 0000000000000..5beb8b6df3a45 --- /dev/null +++ b/hadoop-tools/hadoop-azure/dev-support/testrun-scripts/testsupport.sh @@ -0,0 +1,242 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +testresourcesdir=src/test/resources +combconfsdir=$testresourcesdir/combinationConfigFiles +combtestfile=$testresourcesdir/abfs-combination-test-configs.xml + +logdir=dev-support/testlogs +testresultsregex="Results:(\n|.)*?Tests run:" +testresultsfilename= +starttime= +threadcount= +defaultthreadcount=8 + +properties= +values= + +validate() { + if [ -z "$threadcount" ] ; then + threadcount=$defaultthreadcount + fi + numberegex='^[0-9]+$' + if ! [[ $threadcount =~ $numberegex ]] ; then + echo "Exiting. The script param (threadcount) should be a number" + exit -1 + fi + if [ -z "$combination" ]; then + echo "Exiting. combination cannot be empty" + exit -1 + fi + propertiessize=${#properties[@]} + valuessize=${#values[@]} + if [ "$propertiessize" -lt 1 ] || [ "$valuessize" -lt 1 ] || [ "$propertiessize" -ne "$valuessize" ]; then + echo "Exiting. Both properties and values arrays has to be populated and of same size. Please check for combination $combination" + exit -1 + fi + + for filename in "${combinations[@]}"; do + if [[ ! -f "$combconfsdir/$filename.xml" ]]; then + echo "Exiting. Combination config file ($combconfsdir/$combination.xml) does not exist." + exit -1 + fi + done +} + +checkdependencies() { + if ! [ "$(command -v pcregrep)" ]; then + echo "Exiting. pcregrep is required to run the script." + exit -1 + fi + if ! [ "$(command -v xmlstarlet)" ]; then + echo "Exiting. xmlstarlet is required to run the script." + exit -1 + fi +} + +cleancombinationconfigs() { + rm -rf $combconfsdir + mkdir -p $combconfsdir +} + +generateconfigs() { + combconffile="$combconfsdir/$combination.xml" + rm -rf "$combconffile" + cat > "$combconffile" << ENDOFFILE + + + +ENDOFFILE + + propertiessize=${#properties[@]} + valuessize=${#values[@]} + if [ "$propertiessize" -ne "$valuessize" ]; then + echo "Exiting. Number of properties and values differ for $combination" + exit -1 + fi + for ((i = 0; i < propertiessize; i++)); do + key=${properties[$i]} + val=${values[$i]} + changeconf "$key" "$val" + done + formatxml "$combconffile" +} + +formatxml() { + xmlstarlet fo -s 2 "$1" > "$1.tmp" + mv "$1.tmp" "$1" +} + +setactiveconf() { + if [[ ! -f "$combconfsdir/$combination.xml" ]]; then + echo "Exiting. Combination config file ($combconfsdir/$combination.xml) does not exist." + exit -1 + fi + rm -rf $combtestfile + cat > $combtestfile << ENDOFFILE + + + +ENDOFFILE + xmlstarlet ed -P -L -s /configuration -t elem -n include -v "" $combtestfile + xmlstarlet ed -P -L -i /configuration/include -t attr -n href -v "combinationConfigFiles/$combination.xml" $combtestfile + xmlstarlet ed -P -L -i /configuration/include -t attr -n xmlns -v "http://www.w3.org/2001/XInclude" $combtestfile + formatxml $combtestfile +} + +changeconf() { + xmlstarlet ed -P -L -d "/configuration/property[name='$1']" "$combconffile" + xmlstarlet ed -P -L -s /configuration -t elem -n propertyTMP -v "" -s /configuration/propertyTMP -t elem -n name -v "$1" -r /configuration/propertyTMP -v property "$combconffile" + if ! xmlstarlet ed -P -L -s "/configuration/property[name='$1']" -t elem -n value -v "$2" "$combconffile" + then + echo "Exiting. Changing config property failed." + exit -1 + fi +} + +summary() { + { + echo "" + echo "$combination" + echo "========================" + pcregrep -M "$testresultsregex" "$testlogfilename" + } >> "$testresultsfilename" + printf "\n----- Test results -----\n" + pcregrep -M "$testresultsregex" "$testlogfilename" + + secondstaken=$((ENDTIME - STARTTIME)) + mins=$((secondstaken / 60)) + secs=$((secondstaken % 60)) + printf "\nTime taken: %s mins %s secs.\n" "$mins" "$secs" + echo "Find test logs for the combination ($combination) in: $testlogfilename" + echo "Find consolidated test results in: $testresultsfilename" + echo "----------" +} + +init() { + checkdependencies + if ! mvn clean install -DskipTests + then + echo "" + echo "Exiting. Build failed." + exit -1 + fi + starttime=$(date +"%Y-%m-%d_%H-%M-%S") + mkdir -p "$logdir" + testresultsfilename="$logdir/$starttime/Test-Results.txt" + if [[ -z "$combinations" ]]; then + combinations=( $( ls $combconfsdir/*.xml )) + fi +} + +runtests() { + parseoptions "$@" + validate + if [ -z "$starttime" ]; then + init + fi + shopt -s nullglob + for combconffile in "${combinations[@]}"; do + STARTTIME=$(date +%s) + combination=$(basename "$combconffile" .xml) + mkdir -p "$logdir/$starttime" + testlogfilename="$logdir/$starttime/Test-Logs-$combination.txt" + printf "\nRunning the combination: %s..." "$combination" + setactiveconf + mvn -T 1C -Dparallel-tests=abfs -Dscale -DtestsThreadCount=$threadcount verify >> "$testlogfilename" || true + ENDTIME=$(date +%s) + summary + done +} + +begin() { + cleancombinationconfigs +} + +parseoptions() { +runactivate=0 +runtests=0 + while getopts ":c:a:t:" option; do + case "${option}" in + a) + if [[ "$runactivate" -eq "1" ]]; then + echo "-a Option is not multivalued" + exit 1 + fi + runactivate=1 + combination=$(basename "$OPTARG" .xml) + ;; + c) + runtests=1 + combination=$(basename "$OPTARG" .xml) + combinations+=("$combination") + ;; + t) + threadcount=$OPTARG + ;; + *|?|h) + # shellcheck disable=SC2128 + if [[ -z "$combinations" ]]; then + combinations=( $( ls $combconfsdir/*.xml )) + fi + combstr="" + for combconffile in "${combinations[@]}"; do + combname=$(basename "$combconffile" .xml) + combstr="${combname}, ${combstr}" + done + combstr=${combstr:0:-2} + + echo "Usage: $0 [-n] [-a COMBINATION_NAME] [-c COMBINATION_NAME] [-t THREAD_COUNT]" + echo "" + echo "Where:" + echo " -a COMBINATION_NAME Specify the combination name which needs to be activated." + echo " Configured combinations: ${combstr}" + echo " -c COMBINATION_NAME Specify the combination name for test runs" + echo " -t THREAD_COUNT Specify the thread count" + exit 1 + ;; + esac + done + if [[ "$runactivate" -eq "1" && "$runtests" -eq "1" ]]; then + echo "Both activate (-a option) and test run combinations (-c option) cannot be specified together" + exit 1 + fi + if [[ "$runactivate" -eq "1" ]]; then + setactiveconf + exit 0 + fi +} diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index ff008125031f0..d679e29fe42af 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -19,7 +19,7 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-azure @@ -50,10 +50,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml @@ -169,8 +168,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava @@ -194,7 +193,7 @@ org.wildfly.openssl wildfly-openssl - runtime + compile @@ -303,31 +302,13 @@ - maven-antrun-plugin + org.apache.hadoop + hadoop-maven-plugins - create-parallel-tests-dirs - test-compile - - - - - + parallel-tests-createdir - run + parallel-tests-createdir @@ -342,7 +323,6 @@ test - 1 ${testsThreadCount} false ${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true @@ -408,7 +388,6 @@ verify - 1 ${testsThreadCount} false ${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true @@ -486,31 +465,13 @@ - maven-antrun-plugin + org.apache.hadoop + hadoop-maven-plugins - create-parallel-tests-dirs - test-compile - - - - - + parallel-tests-createdir - run + parallel-tests-createdir @@ -593,6 +554,7 @@ **/azurebfs/ITestAbfsReadWriteAndSeek.java **/azurebfs/ITestAzureBlobFileSystemListStatus.java **/azurebfs/extensions/ITestAbfsDelegationTokens.java + **/azurebfs/ITestSmallWriteOptimization.java @@ -632,6 +594,7 @@ **/azurebfs/ITestAbfsReadWriteAndSeek.java **/azurebfs/ITestAzureBlobFileSystemListStatus.java **/azurebfs/extensions/ITestAbfsDelegationTokens.java + **/azurebfs/ITestSmallWriteOptimization.java @@ -652,31 +615,13 @@ - maven-antrun-plugin + org.apache.hadoop + hadoop-maven-plugins - create-parallel-tests-dirs - test-compile - - - - - + parallel-tests-createdir - run + parallel-tests-createdir @@ -691,7 +636,6 @@ test - 1 ${testsThreadCount} false ${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true diff --git a/hadoop-tools/hadoop-azure/src/config/checkstyle-suppressions.xml b/hadoop-tools/hadoop-azure/src/config/checkstyle-suppressions.xml index c50236162d803..070c8c1fe827a 100644 --- a/hadoop-tools/hadoop-azure/src/config/checkstyle-suppressions.xml +++ b/hadoop-tools/hadoop-azure/src/config/checkstyle-suppressions.xml @@ -46,4 +46,6 @@ files="org[\\/]apache[\\/]hadoop[\\/]fs[\\/]azurebfs[\\/]AzureBlobFileSystemStore.java"/> + diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java index a9be8c5a75e8c..f1f8d3d5003c6 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java @@ -30,7 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; class AzureFileSystemThreadPoolExecutor { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java index 414d2f2ee098a..c613468536b6c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java @@ -64,7 +64,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.RetryExponentialRetry; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java index 5f051effefb9a..5412c0544061b 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java @@ -27,7 +27,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import java.util.Locale; import java.util.UUID; import java.util.Random; import java.util.concurrent.ConcurrentLinkedDeque; @@ -42,8 +41,9 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FSExceptionMessages; @@ -551,13 +551,7 @@ public boolean hasCapability(String capability) { if (!compactionEnabled) { return false; } - switch (capability.toLowerCase(Locale.ENGLISH)) { - case StreamCapabilities.HSYNC: - case StreamCapabilities.HFLUSH: - return true; - default: - return false; - } + return StoreImplementationUtils.isProbeForSyncable(capability); } /** diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java index 016ae745c5688..5fa0ad8ae34e8 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.fs.azure; -import com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; import org.apache.hadoop.conf.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.TimeUnit; -import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; /** * Class that provides caching for Authorize and getSasUri calls diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java index 859a608a1e1e7..48ffa65f69590 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java @@ -18,8 +18,8 @@ package org.apache.hadoop.fs.azure; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index f5705283b512d..48ef495d7b7ef 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -70,6 +70,7 @@ import org.apache.hadoop.fs.azure.security.Constants; import org.apache.hadoop.fs.azure.security.RemoteWasbDelegationTokenManager; import org.apache.hadoop.fs.azure.security.WasbDelegationTokenManager; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; @@ -81,7 +82,7 @@ import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,7 +90,7 @@ import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.microsoft.azure.storage.StorageException; /** @@ -1052,10 +1053,7 @@ public void hsync() throws IOException { */ @Override // StreamCapability public boolean hasCapability(String capability) { - if (out instanceof StreamCapabilities) { - return ((StreamCapabilities) out).hasCapability(capability); - } - return false; + return StoreImplementationUtils.hasCapability(out, capability); } @Override diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java index e4ad70cedb2cf..c918518bff24a 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java @@ -23,7 +23,7 @@ import java.net.HttpURLConnection; import java.util.Map; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java index 414a01115c1d8..0944b1b0987c1 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation; import org.apache.hadoop.fs.permission.PermissionStatus; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** *

    diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java index 591c2ec50dee7..1e409cd908fa8 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java @@ -43,7 +43,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.BlobRequestOptions; @@ -580,6 +580,7 @@ public synchronized void hsync() throws IOException { // Restore the interrupted status Thread.currentThread().interrupt(); } + checkStreamState(); LOG.debug("Leaving PageBlobOutputStream#hsync(). Total hsync duration = " + (System.currentTimeMillis() - start) + " msec."); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java index 76ced3b96da5d..33ae9b83adea9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azure.security.Constants; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java index 10956f73f729a..200945f6b533e 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java @@ -22,7 +22,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.microsoft.azure.storage.AccessCondition; import com.microsoft.azure.storage.StorageException; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java index dcfff2fbe3784..f8aed2612a857 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java @@ -22,9 +22,13 @@ import java.io.IOException; import java.io.OutputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; -import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.impl.StoreImplementationUtils; /** * Support the Syncable interface on top of a DataOutputStream. @@ -35,6 +39,8 @@ public class SyncableDataOutputStream extends DataOutputStream implements Syncable, StreamCapabilities { + private static final Logger LOG = LoggerFactory.getLogger(SyncableDataOutputStream.class); + public SyncableDataOutputStream(OutputStream out) { super(out); } @@ -51,10 +57,7 @@ public OutputStream getOutStream() { @Override public boolean hasCapability(String capability) { - if (out instanceof StreamCapabilities) { - return ((StreamCapabilities) out).hasCapability(capability); - } - return false; + return StoreImplementationUtils.hasCapability(out, capability); } @Override @@ -70,4 +73,34 @@ public void hsync() throws IOException { ((Syncable) out).hsync(); } } + + @Override + public void close() throws IOException { + IOException ioeFromFlush = null; + try { + flush(); + } catch (IOException e) { + ioeFromFlush = e; + throw e; + } finally { + try { + this.out.close(); + } catch (IOException e) { + // If there was an Exception during flush(), the Azure SDK will throw back the + // same when we call close on the same stream. When try and finally both throw + // Exception, Java will use Throwable#addSuppressed for one of the Exception so + // that the caller will get one exception back. When within this, if both + // Exceptions are equal, it will throw back IllegalStateException. This makes us + // to throw back a non IOE. The below special handling is to avoid this. + if (ioeFromFlush == e) { + // Do nothing.. + // The close() call gave back the same IOE which flush() gave. Just swallow it + LOG.debug("flush() and close() throwing back same Exception. Just swallowing the latter", e); + } else { + // Let Java handle 2 different Exceptions been thrown from try and finally. + throw e; + } + } + } + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java index f512489a8ae90..d819ede67c466 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbFsck.java @@ -33,7 +33,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * An fsck tool implementation for WASB that does various admin/cleanup/recovery diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java index 606c3f040f8fe..e595a7972a0c3 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azure; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.azure.security.Constants; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.http.Header; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java index 61fe3d8d6d22f..193be48029a34 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java @@ -20,10 +20,9 @@ import java.io.IOException; import java.lang.reflect.Field; -import java.util.Map; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; @@ -47,6 +46,7 @@ import org.apache.hadoop.fs.azurebfs.diagnostics.IntegerConfigurationBasicValidator; import org.apache.hadoop.fs.azurebfs.diagnostics.LongConfigurationBasicValidator; import org.apache.hadoop.fs.azurebfs.diagnostics.StringConfigurationBasicValidator; +import org.apache.hadoop.fs.azurebfs.enums.Trilean; import org.apache.hadoop.fs.azurebfs.extensions.CustomTokenProviderAdaptee; import org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider; import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; @@ -57,6 +57,7 @@ import org.apache.hadoop.fs.azurebfs.oauth2.UserPasswordTokenProvider; import org.apache.hadoop.fs.azurebfs.security.AbfsDelegationTokenManager; import org.apache.hadoop.fs.azurebfs.services.AuthType; +import org.apache.hadoop.fs.azurebfs.services.ExponentialRetryPolicy; import org.apache.hadoop.fs.azurebfs.services.KeyProvider; import org.apache.hadoop.fs.azurebfs.services.SimpleKeyProvider; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; @@ -81,12 +82,38 @@ public class AbfsConfiguration{ private final boolean isSecure; private static final Logger LOG = LoggerFactory.getLogger(AbfsConfiguration.class); + @StringConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ACCOUNT_IS_HNS_ENABLED, + DefaultValue = DEFAULT_FS_AZURE_ACCOUNT_IS_HNS_ENABLED) + private String isNamespaceEnabledAccount; + + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = AZURE_WRITE_MAX_CONCURRENT_REQUESTS, + DefaultValue = -1) + private int writeMaxConcurrentRequestCount; + + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = AZURE_WRITE_MAX_REQUESTS_TO_QUEUE, + DefaultValue = -1) + private int maxWriteRequestsToQueue; + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = AZURE_WRITE_BUFFER_SIZE, MinValue = MIN_BUFFER_SIZE, MaxValue = MAX_BUFFER_SIZE, DefaultValue = DEFAULT_WRITE_BUFFER_SIZE) private int writeBufferSize; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = AZURE_ENABLE_SMALL_WRITE_OPTIMIZATION, + DefaultValue = DEFAULT_AZURE_ENABLE_SMALL_WRITE_OPTIMIZATION) + private boolean enableSmallWriteOptimization; + + @BooleanConfigurationValidatorAnnotation( + ConfigurationKey = AZURE_READ_SMALL_FILES_COMPLETELY, + DefaultValue = DEFAULT_READ_SMALL_FILES_COMPLETELY) + private boolean readSmallFilesCompletely; + + @BooleanConfigurationValidatorAnnotation( + ConfigurationKey = AZURE_READ_OPTIMIZE_FOOTER_READ, + DefaultValue = DEFAULT_OPTIMIZE_FOOTER_READ) + private boolean optimizeFooterRead; + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = AZURE_READ_BUFFER_SIZE, MinValue = MIN_BUFFER_SIZE, MaxValue = MAX_BUFFER_SIZE, @@ -110,6 +137,31 @@ public class AbfsConfiguration{ DefaultValue = DEFAULT_MAX_RETRY_ATTEMPTS) private int maxIoRetries; + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = AZURE_CUSTOM_TOKEN_FETCH_RETRY_COUNT, + MinValue = 0, + DefaultValue = DEFAULT_CUSTOM_TOKEN_FETCH_RETRY_COUNT) + private int customTokenFetchRetryCount; + + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = AZURE_OAUTH_TOKEN_FETCH_RETRY_COUNT, + MinValue = 0, + DefaultValue = DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_ATTEMPTS) + private int oauthTokenFetchRetryCount; + + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF, + MinValue = 0, + DefaultValue = DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF_INTERVAL) + private int oauthTokenFetchRetryMinBackoff; + + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF, + MinValue = 0, + DefaultValue = DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF_INTERVAL) + private int oauthTokenFetchRetryMaxBackoff; + + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF, + MinValue = 0, + DefaultValue = DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF) + private int oauthTokenFetchRetryDeltaBackoff; + @LongConfigurationValidatorAnnotation(ConfigurationKey = AZURE_BLOCK_SIZE_PROPERTY_NAME, MinValue = 0, MaxValue = MAX_AZURE_BLOCK_SIZE, @@ -143,6 +195,14 @@ public class AbfsConfiguration{ DefaultValue = DEFAULT_FS_AZURE_ATOMIC_RENAME_DIRECTORIES) private String azureAtomicDirs; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ENABLE_CONDITIONAL_CREATE_OVERWRITE, + DefaultValue = DEFAULT_FS_AZURE_ENABLE_CONDITIONAL_CREATE_OVERWRITE) + private boolean enableConditionalCreateOverwrite; + + @StringConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_APPEND_BLOB_KEY, + DefaultValue = DEFAULT_FS_AZURE_APPEND_BLOB_DIRECTORIES) + private String azureAppendBlobDirs; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, DefaultValue = DEFAULT_AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION) private boolean createRemoteFileSystemDuringInitialization; @@ -155,6 +215,16 @@ public class AbfsConfiguration{ DefaultValue = DEFAULT_READ_AHEAD_QUEUE_DEPTH) private int readAheadQueueDepth; + @IntegerConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_READ_AHEAD_BLOCK_SIZE, + MinValue = MIN_BUFFER_SIZE, + MaxValue = MAX_BUFFER_SIZE, + DefaultValue = DEFAULT_READ_AHEAD_BLOCK_SIZE) + private int readAheadBlockSize; + + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ALWAYS_READ_BUFFER_SIZE, + DefaultValue = DEFAULT_ALWAYS_READ_BUFFER_SIZE) + private boolean alwaysReadBufferSize; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ENABLE_FLUSH, DefaultValue = DEFAULT_ENABLE_FLUSH) private boolean enableFlush; @@ -168,9 +238,17 @@ public class AbfsConfiguration{ private boolean enableAutoThrottling; @StringConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_USER_AGENT_PREFIX_KEY, - DefaultValue = "") + DefaultValue = DEFAULT_FS_AZURE_USER_AGENT_PREFIX) private String userAgentId; + @StringConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_CLUSTER_NAME, + DefaultValue = DEFAULT_VALUE_UNKNOWN) + private String clusterName; + + @StringConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_CLUSTER_TYPE, + DefaultValue = DEFAULT_VALUE_UNKNOWN) + private String clusterType; + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ENABLE_DELEGATION_TOKEN, DefaultValue = DEFAULT_ENABLE_DELEGATION_TOKEN) private boolean enableDelegationToken; @@ -192,7 +270,14 @@ public class AbfsConfiguration{ DefaultValue = DEFAULT_ABFS_LATENCY_TRACK) private boolean trackLatency; - private Map storageAccountKeys; + @LongConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS, + MinValue = 0, + DefaultValue = DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS) + private long sasTokenRenewPeriodForStreamsInSeconds; + + @BooleanConfigurationValidatorAnnotation(ConfigurationKey = + FS_AZURE_ENABLE_ABFS_LIST_ITERATOR, DefaultValue = DEFAULT_ENABLE_ABFS_LIST_ITERATOR) + private boolean enableAbfsListIterator; public AbfsConfiguration(final Configuration rawConfig, String accountName) throws IllegalAccessException, InvalidConfigurationValueException, IOException { @@ -201,7 +286,6 @@ public AbfsConfiguration(final Configuration rawConfig, String accountName) this.accountName = accountName; this.isSecure = getBoolean(FS_AZURE_SECURE_MODE, false); - validateStorageAccountKeys(); Field[] fields = this.getClass().getDeclaredFields(); for (Field field : fields) { field.setAccessible(true); @@ -219,6 +303,10 @@ public AbfsConfiguration(final Configuration rawConfig, String accountName) } } + public Trilean getIsNamespaceEnabledAccount() { + return Trilean.getTrilean(isNamespaceEnabledAccount); + } + /** * Gets the Azure Storage account name corresponding to this instance of configuration. * @return the Azure Storage account name @@ -298,31 +386,91 @@ public String getPasswordString(String key) throws IOException { } /** - * Returns the account-specific Class if it exists, then looks for an - * account-agnostic value, and finally tries the default value. + * Returns account-specific token provider class if it exists, else checks if + * an account-agnostic setting is present for token provider class if AuthType + * matches with authType passed. + * @param authType AuthType effective on the account * @param name Account-agnostic configuration key * @param defaultValue Class returned if none is configured * @param xface Interface shared by all possible values + * @param Interface class type * @return Highest-precedence Class object that was found */ - public Class getClass(String name, Class defaultValue, Class xface) { + public Class getTokenProviderClass(AuthType authType, + String name, + Class defaultValue, + Class xface) { + Class tokenProviderClass = getAccountSpecificClass(name, defaultValue, + xface); + + // If there is none set specific for account + // fall back to generic setting if Auth Type matches + if ((tokenProviderClass == null) + && (authType == getAccountAgnosticEnum( + FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey))) { + tokenProviderClass = getAccountAgnosticClass(name, defaultValue, xface); + } + + return (tokenProviderClass == null) + ? null + : tokenProviderClass.asSubclass(xface); + } + + /** + * Returns the account-specific class if it exists, else returns default value. + * @param name Account-agnostic configuration key + * @param defaultValue Class returned if none is configured + * @param xface Interface shared by all possible values + * @param Interface class type + * @return Account specific Class object that was found + */ + public Class getAccountSpecificClass(String name, + Class defaultValue, + Class xface) { return rawConfig.getClass(accountConf(name), - rawConfig.getClass(name, defaultValue, xface), + defaultValue, xface); } /** - * Returns the account-specific password in string form if it exists, then + * Returns account-agnostic Class if it exists, else returns the default value. + * @param name Account-agnostic configuration key + * @param defaultValue Class returned if none is configured + * @param xface Interface shared by all possible values + * @param Interface class type + * @return Account-Agnostic Class object that was found + */ + public Class getAccountAgnosticClass(String name, + Class defaultValue, + Class xface) { + return rawConfig.getClass(name, defaultValue, xface); + } + + /** + * Returns the account-specific enum value if it exists, then * looks for an account-agnostic value. * @param name Account-agnostic configuration key * @param defaultValue Value returned if none is configured - * @return value in String form if one exists, else null + * @param Enum type + * @return enum value if one exists, else null */ public > T getEnum(String name, T defaultValue) { return rawConfig.getEnum(accountConf(name), rawConfig.getEnum(name, defaultValue)); } + /** + * Returns the account-agnostic enum value if it exists, else + * return default. + * @param name Account-agnostic configuration key + * @param defaultValue Value returned if none is configured + * @param Enum type + * @return enum value if one exists, else null + */ + public > T getAccountAgnosticEnum(String name, T defaultValue) { + return rawConfig.getEnum(name, defaultValue); + } + /** * Unsets parameter in the underlying Configuration object. * Provided only as a convenience; does not add any account logic. @@ -397,6 +545,18 @@ public int getWriteBufferSize() { return this.writeBufferSize; } + public boolean isSmallWriteOptimizationEnabled() { + return this.enableSmallWriteOptimization; + } + + public boolean readSmallFilesCompletely() { + return this.readSmallFilesCompletely; + } + + public boolean optimizeFooterRead() { + return this.optimizeFooterRead; + } + public int getReadBufferSize() { return this.readBufferSize; } @@ -417,6 +577,10 @@ public int getMaxIoRetries() { return this.maxIoRetries; } + public int getCustomTokenFetchRetryCount() { + return this.customTokenFetchRetryCount; + } + public long getAzureBlockSize() { return this.azureBlockSize; } @@ -425,6 +589,10 @@ public boolean isCheckAccessEnabled() { return this.isCheckAccessEnabled; } + public long getSasTokenRenewPeriodForStreamsInSeconds() { + return this.sasTokenRenewPeriodForStreamsInSeconds; + } + public String getAzureBlockLocationHost() { return this.azureBlockLocationHost; } @@ -449,6 +617,14 @@ public String getAzureAtomicRenameDirs() { return this.azureAtomicDirs; } + public boolean isConditionalCreateOverwriteEnabled() { + return this.enableConditionalCreateOverwrite; + } + + public String getAppendBlobDirs() { + return this.azureAppendBlobDirs; + } + public boolean getCreateRemoteFileSystemDuringInitialization() { // we do not support creating the filesystem when AuthType is SAS return this.createRemoteFileSystemDuringInitialization @@ -463,6 +639,14 @@ public int getReadAheadQueueDepth() { return this.readAheadQueueDepth; } + public int getReadAheadBlockSize() { + return this.readAheadBlockSize; + } + + public boolean shouldReadBufferSizeAlways() { + return this.alwaysReadBufferSize; + } + public boolean isFlushEnabled() { return this.enableFlush; } @@ -479,6 +663,14 @@ public String getCustomUserAgentPrefix() { return this.userAgentId; } + public String getClusterName() { + return this.clusterName; + } + + public String getClusterType() { + return this.clusterType; + } + public DelegatingSSLSocketFactory.SSLChannelMode getPreferredSSLFactoryOption() { return getEnum(FS_AZURE_SSL_CHANNEL_MODE_KEY, DEFAULT_FS_AZURE_SSL_CHANNEL_MODE); } @@ -517,8 +709,10 @@ public AccessTokenProvider getTokenProvider() throws TokenAccessProviderExceptio if (authType == AuthType.OAuth) { try { Class tokenProviderClass = - getClass(FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME, null, - AccessTokenProvider.class); + getTokenProviderClass(authType, + FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME, null, + AccessTokenProvider.class); + AccessTokenProvider tokenProvider = null; if (tokenProviderClass == ClientCredsTokenProvider.class) { String authEndpoint = getPasswordString(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT); @@ -561,14 +755,17 @@ public AccessTokenProvider getTokenProvider() throws TokenAccessProviderExceptio } catch(IllegalArgumentException e) { throw e; } catch (Exception e) { - throw new TokenAccessProviderException("Unable to load key provider class.", e); + throw new TokenAccessProviderException("Unable to load OAuth token provider class.", e); } } else if (authType == AuthType.Custom) { try { String configKey = FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME; - Class customTokenProviderClass = - getClass(configKey, null, CustomTokenProviderAdaptee.class); + + Class customTokenProviderClass + = getTokenProviderClass(authType, configKey, null, + CustomTokenProviderAdaptee.class); + if (customTokenProviderClass == null) { throw new IllegalArgumentException( String.format("The configuration value for \"%s\" is invalid.", configKey)); @@ -581,7 +778,7 @@ public AccessTokenProvider getTokenProvider() throws TokenAccessProviderExceptio LOG.trace("Initializing {}", customTokenProviderClass.getName()); azureTokenProvider.initialize(rawConfig, accountName); LOG.trace("{} init complete", customTokenProviderClass.getName()); - return new CustomTokenProviderAdapter(azureTokenProvider); + return new CustomTokenProviderAdapter(azureTokenProvider, getCustomTokenFetchRetryCount()); } catch(IllegalArgumentException e) { throw e; } catch (Exception e) { @@ -604,7 +801,9 @@ public SASTokenProvider getSASTokenProvider() throws AzureBlobFileSystemExceptio try { String configKey = FS_AZURE_SAS_TOKEN_PROVIDER_TYPE; Class sasTokenProviderClass = - getClass(configKey, null, SASTokenProvider.class); + getTokenProviderClass(authType, configKey, null, + SASTokenProvider.class); + Preconditions.checkArgument(sasTokenProviderClass != null, String.format("The configuration value for \"%s\" is invalid.", configKey)); @@ -622,16 +821,6 @@ public SASTokenProvider getSASTokenProvider() throws AzureBlobFileSystemExceptio } } - void validateStorageAccountKeys() throws InvalidConfigurationValueException { - Base64StringConfigurationBasicValidator validator = new Base64StringConfigurationBasicValidator( - FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME, "", true); - this.storageAccountKeys = rawConfig.getValByRegex(FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME_REGX); - - for (Map.Entry account : storageAccountKeys.entrySet()) { - validator.validate(account.getValue()); - } - } - int validateInt(Field field) throws IllegalAccessException, InvalidConfigurationValueException { IntegerConfigurationValidatorAnnotation validator = field.getAnnotation(IntegerConfigurationValidatorAnnotation.class); String value = get(validator.ConfigurationKey()); @@ -691,6 +880,30 @@ boolean validateBoolean(Field field) throws IllegalAccessException, InvalidConfi validator.ThrowIfInvalid()).validate(value); } + public ExponentialRetryPolicy getOauthTokenFetchRetryPolicy() { + return new ExponentialRetryPolicy(oauthTokenFetchRetryCount, + oauthTokenFetchRetryMinBackoff, oauthTokenFetchRetryMaxBackoff, + oauthTokenFetchRetryDeltaBackoff); + } + + public int getWriteMaxConcurrentRequestCount() { + if (this.writeMaxConcurrentRequestCount < 1) { + return 4 * Runtime.getRuntime().availableProcessors(); + } + return this.writeMaxConcurrentRequestCount; + } + + public int getMaxWriteRequestsToQueue() { + if (this.maxWriteRequestsToQueue < 1) { + return 2 * getWriteMaxConcurrentRequestCount(); + } + return this.maxWriteRequestsToQueue; + } + + public boolean enableAbfsListIterator() { + return this.enableAbfsListIterator; + } + @VisibleForTesting void setReadBufferSize(int bufferSize) { this.readBufferSize = bufferSize; @@ -716,6 +929,21 @@ void setListMaxResults(int listMaxResults) { this.listMaxResults = listMaxResults; } + @VisibleForTesting + public void setMaxIoRetries(int maxIoRetries) { + this.maxIoRetries = maxIoRetries; + } + + @VisibleForTesting + void setMaxBackoffIntervalMilliseconds(int maxBackoffInterval) { + this.maxBackoffInterval = maxBackoffInterval; + } + + @VisibleForTesting + void setIsNamespaceEnabledAccount(String isNamespaceEnabledAccount) { + this.isNamespaceEnabledAccount = isNamespaceEnabledAccount; + } + private String getTrimmedPasswordString(String key, String defaultValue) throws IOException { String value = getPasswordString(key); if (StringUtils.isBlank(value)) { @@ -731,4 +959,19 @@ private String appendSlashIfNeeded(String authority) { return authority; } + @VisibleForTesting + public void setReadSmallFilesCompletely(boolean readSmallFilesCompletely) { + this.readSmallFilesCompletely = readSmallFilesCompletely; + } + + @VisibleForTesting + public void setOptimizeFooterRead(boolean optimizeFooterRead) { + this.optimizeFooterRead = optimizeFooterRead; + } + + @VisibleForTesting + public void setEnableAbfsListIterator(boolean enableAbfsListIterator) { + this.enableAbfsListIterator = enableAbfsListIterator; + } + } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java new file mode 100644 index 0000000000000..357f53b611d3c --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java @@ -0,0 +1,286 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.net.URI; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.metrics2.MetricStringBuilder; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsTag; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableMetric; + +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.*; + +/** + * Instrumentation of Abfs counters. + */ +public class AbfsCountersImpl implements AbfsCounters { + + /** + * Single context for all the Abfs counters to separate them from other + * counters. + */ + private static final String CONTEXT = "AbfsContext"; + /** + * The name of a field added to metrics records that uniquely identifies a + * specific FileSystem instance. + */ + private static final String REGISTRY_ID = "AbfsID"; + /** + * The name of a field added to metrics records that indicates the hostname + * portion of the FS URL. + */ + private static final String METRIC_BUCKET = "AbfsBucket"; + + private final MetricsRegistry registry = + new MetricsRegistry("abfsMetrics").setContext(CONTEXT); + + private static final AbfsStatistic[] STATISTIC_LIST = { + CALL_CREATE, + CALL_OPEN, + CALL_GET_FILE_STATUS, + CALL_APPEND, + CALL_CREATE_NON_RECURSIVE, + CALL_DELETE, + CALL_EXIST, + CALL_GET_DELEGATION_TOKEN, + CALL_LIST_STATUS, + CALL_MKDIRS, + CALL_RENAME, + DIRECTORIES_CREATED, + DIRECTORIES_DELETED, + FILES_CREATED, + FILES_DELETED, + ERROR_IGNORED, + CONNECTIONS_MADE, + SEND_REQUESTS, + GET_RESPONSES, + BYTES_SENT, + BYTES_RECEIVED, + READ_THROTTLES, + WRITE_THROTTLES + }; + + public AbfsCountersImpl(URI uri) { + UUID fileSystemInstanceId = UUID.randomUUID(); + registry.tag(REGISTRY_ID, + "A unique identifier for the instance", + fileSystemInstanceId.toString()); + registry.tag(METRIC_BUCKET, "Hostname from the FS URL", uri.getHost()); + + for (AbfsStatistic stats : STATISTIC_LIST) { + createCounter(stats); + } + } + + /** + * Look up a Metric from registered set. + * + * @param name name of metric. + * @return the metric or null. + */ + private MutableMetric lookupMetric(String name) { + return getRegistry().get(name); + } + + /** + * Look up counter by name. + * + * @param name name of counter. + * @return counter if found, else null. + */ + private MutableCounterLong lookupCounter(String name) { + MutableMetric metric = lookupMetric(name); + if (metric == null) { + return null; + } + if (!(metric instanceof MutableCounterLong)) { + throw new IllegalStateException("Metric " + name + + " is not a MutableCounterLong: " + metric); + } + return (MutableCounterLong) metric; + } + + /** + * Create a counter in the registry. + * + * @param stats AbfsStatistic whose counter needs to be made. + * @return counter or null. + */ + private MutableCounterLong createCounter(AbfsStatistic stats) { + return registry.newCounter(stats.getStatName(), + stats.getStatDescription(), 0L); + } + + /** + * {@inheritDoc} + * + * Increment a statistic with some value. + * + * @param statistic AbfsStatistic need to be incremented. + * @param value long value to be incremented by. + */ + @Override + public void incrementCounter(AbfsStatistic statistic, long value) { + MutableCounterLong counter = lookupCounter(statistic.getStatName()); + if (counter != null) { + counter.incr(value); + } + } + + /** + * Getter for MetricRegistry. + * + * @return MetricRegistry or null. + */ + private MetricsRegistry getRegistry() { + return registry; + } + + /** + * {@inheritDoc} + * + * Method to aggregate all the counters in the MetricRegistry and form a + * string with prefix, separator and suffix. + * + * @param prefix string that would be before metric. + * @param separator string that would be between metric name and value. + * @param suffix string that would be after metric value. + * @param all gets all the values even if unchanged. + * @return a String with all the metrics and their values. + */ + @Override + public String formString(String prefix, String separator, String suffix, + boolean all) { + + MetricStringBuilder metricStringBuilder = new MetricStringBuilder(null, + prefix, separator, suffix); + registry.snapshot(metricStringBuilder, all); + return metricStringBuilder.toString(); + } + + /** + * {@inheritDoc} + * + * Creating a map of all the counters for testing. + * + * @return a map of the metrics. + */ + @VisibleForTesting + @Override + public Map toMap() { + MetricsToMap metricBuilder = new MetricsToMap(null); + registry.snapshot(metricBuilder, true); + return metricBuilder.getMap(); + } + + protected static class MetricsToMap extends MetricsRecordBuilder { + private final MetricsCollector parent; + private final Map map = + new HashMap<>(); + + MetricsToMap(MetricsCollector parent) { + this.parent = parent; + } + + @Override + public MetricsRecordBuilder tag(MetricsInfo info, String value) { + return this; + } + + @Override + public MetricsRecordBuilder add(MetricsTag tag) { + return this; + } + + @Override + public MetricsRecordBuilder add(AbstractMetric metric) { + return this; + } + + @Override + public MetricsRecordBuilder setContext(String value) { + return this; + } + + @Override + public MetricsRecordBuilder addCounter(MetricsInfo info, int value) { + return tuple(info, value); + } + + @Override + public MetricsRecordBuilder addCounter(MetricsInfo info, long value) { + return tuple(info, value); + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo info, int value) { + return tuple(info, value); + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo info, long value) { + return tuple(info, value); + } + + public MetricsToMap tuple(MetricsInfo info, long value) { + return tuple(info.name(), value); + } + + public MetricsToMap tuple(String name, long value) { + map.put(name, value); + return this; + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo info, float value) { + return tuple(info, (long) value); + } + + @Override + public MetricsRecordBuilder addGauge(MetricsInfo info, double value) { + return tuple(info, (long) value); + } + + @Override + public MetricsCollector parent() { + return parent; + } + + /** + * Get the map. + * + * @return the map of metrics. + */ + public Map getMap() { + return map; + } + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java new file mode 100644 index 0000000000000..2935cd754315d --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsStatistic.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames; + +/** + * Statistic which are collected in Abfs. + * Available as metrics in {@link AbfsCountersImpl}. + */ +public enum AbfsStatistic { + + CALL_CREATE(CommonStatisticNames.OP_CREATE, + "Calls of create()."), + CALL_OPEN(CommonStatisticNames.OP_OPEN, + "Calls of open()."), + CALL_GET_FILE_STATUS(CommonStatisticNames.OP_GET_FILE_STATUS, + "Calls of getFileStatus()."), + CALL_APPEND(CommonStatisticNames.OP_APPEND, + "Calls of append()."), + CALL_CREATE_NON_RECURSIVE(CommonStatisticNames.OP_CREATE_NON_RECURSIVE, + "Calls of createNonRecursive()."), + CALL_DELETE(CommonStatisticNames.OP_DELETE, + "Calls of delete()."), + CALL_EXIST(CommonStatisticNames.OP_EXISTS, + "Calls of exist()."), + CALL_GET_DELEGATION_TOKEN(CommonStatisticNames.OP_GET_DELEGATION_TOKEN, + "Calls of getDelegationToken()."), + CALL_LIST_STATUS(CommonStatisticNames.OP_LIST_STATUS, + "Calls of listStatus()."), + CALL_MKDIRS(CommonStatisticNames.OP_MKDIRS, + "Calls of mkdirs()."), + CALL_RENAME(CommonStatisticNames.OP_RENAME, + "Calls of rename()."), + DIRECTORIES_CREATED("directories_created", + "Total number of directories created through the object store."), + DIRECTORIES_DELETED("directories_deleted", + "Total number of directories deleted through the object store."), + FILES_CREATED("files_created", + "Total number of files created through the object store."), + FILES_DELETED("files_deleted", + "Total number of files deleted from the object store."), + ERROR_IGNORED("error_ignored", + "Errors caught and ignored."), + + //Network statistics. + CONNECTIONS_MADE("connections_made", + "Total number of times a connection was made with the data store."), + SEND_REQUESTS("send_requests", + "Total number of times http requests were sent to the data store."), + GET_RESPONSES("get_responses", + "Total number of times a response was received."), + BYTES_SENT("bytes_sent", + "Total bytes uploaded."), + BYTES_RECEIVED("bytes_received", + "Total bytes received."), + READ_THROTTLES("read_throttles", + "Total number of times a read operation is throttled."), + WRITE_THROTTLES("write_throttles", + "Total number of times a write operation is throttled."); + + private String statName; + private String statDescription; + + /** + * Constructor of AbfsStatistic to set statistic name and description. + * + * @param statName Name of the statistic. + * @param statDescription Description of the statistic. + */ + AbfsStatistic(String statName, String statDescription) { + this.statName = statName; + this.statDescription = statDescription; + } + + /** + * Getter for statistic name. + * + * @return Name of statistic. + */ + public String getStatName() { + return statName; + } + + /** + * Getter for statistic description. + * + * @return Description of statistic. + */ + public String getStatDescription() { + return statDescription; + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java index 8eda2f3730400..ed607b38e6bfc 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java @@ -26,24 +26,31 @@ import java.net.HttpURLConnection; import java.net.URI; import java.net.URISyntaxException; +import java.nio.file.AccessDeniedException; import java.util.Hashtable; import java.util.List; import java.util.ArrayList; +import java.util.Collections; import java.util.EnumSet; +import java.util.Map; +import java.util.Optional; import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.fs.azurebfs.services.AbfsClient; import org.apache.hadoop.fs.azurebfs.services.AbfsClientThrottlingIntercept; +import org.apache.hadoop.fs.azurebfs.services.AbfsListStatusRemoteIterator; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -68,6 +75,9 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.SASTokenProviderException; import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode; import org.apache.hadoop.fs.azurebfs.security.AbfsDelegationTokenManager; +import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; +import org.apache.hadoop.fs.impl.AbstractFSBuilderImpl; +import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -76,8 +86,11 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.functional.RemoteIterators; +import org.apache.hadoop.util.LambdaUtils; import org.apache.hadoop.util.Progressable; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.*; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; /** @@ -94,6 +107,7 @@ public class AzureBlobFileSystem extends FileSystem { private boolean delegationTokenEnabled = false; private AbfsDelegationTokenManager delegationTokenManager; + private AbfsCounters abfsCounters; @Override public void initialize(URI uri, Configuration configuration) @@ -105,11 +119,12 @@ public void initialize(URI uri, Configuration configuration) LOG.debug("Initializing AzureBlobFileSystem for {}", uri); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); - this.abfsStore = new AzureBlobFileSystemStore(uri, this.isSecureScheme(), configuration); + abfsCounters = new AbfsCountersImpl(uri); + this.abfsStore = new AzureBlobFileSystemStore(uri, this.isSecureScheme(), + configuration, abfsCounters); LOG.trace("AzureBlobFileSystemStore init complete"); final AbfsConfiguration abfsConfiguration = abfsStore.getAbfsConfiguration(); - this.setWorkingDirectory(this.getHomeDirectory()); if (abfsConfiguration.getCreateRemoteFileSystemDuringInitialization()) { @@ -146,6 +161,11 @@ public String toString() { sb.append("uri=").append(uri); sb.append(", user='").append(abfsStore.getUser()).append('\''); sb.append(", primaryUserGroup='").append(abfsStore.getPrimaryGroup()).append('\''); + if (abfsCounters != null) { + sb.append(", Statistics: {").append(abfsCounters.formString("{", "=", + "}", true)); + sb.append("}"); + } sb.append('}'); return sb.toString(); } @@ -162,11 +182,18 @@ public URI getUri() { @Override public FSDataInputStream open(final Path path, final int bufferSize) throws IOException { LOG.debug("AzureBlobFileSystem.open path: {} bufferSize: {}", path, bufferSize); + // bufferSize is unused. + return open(path, Optional.empty()); + } + private FSDataInputStream open(final Path path, + final Optional options) throws IOException { + statIncrement(CALL_OPEN); Path qualifiedPath = makeQualified(path); try { - InputStream inputStream = abfsStore.openFileForRead(qualifiedPath, statistics); + InputStream inputStream = abfsStore.openFileForRead(qualifiedPath, + options, statistics); return new FSDataInputStream(inputStream); } catch(AzureBlobFileSystemException ex) { checkException(path, ex); @@ -174,6 +201,19 @@ public FSDataInputStream open(final Path path, final int bufferSize) throws IOEx } } + @Override + protected CompletableFuture openFileWithOptions( + final Path path, final OpenFileParameters parameters) throws IOException { + LOG.debug("AzureBlobFileSystem.openFileWithOptions path: {}", path); + AbstractFSBuilderImpl.rejectUnknownMandatoryKeys( + parameters.getMandatoryKeys(), + Collections.emptySet(), + "for " + path); + return LambdaUtils.eval( + new CompletableFuture<>(), () -> + open(path, Optional.of(parameters.getOptions()))); + } + @Override public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, final Progressable progress) throws IOException { @@ -183,13 +223,15 @@ public FSDataOutputStream create(final Path f, final FsPermission permission, fi overwrite, blockSize); + statIncrement(CALL_CREATE); trailingPeriodCheck(f); Path qualifiedPath = makeQualified(f); try { - OutputStream outputStream = abfsStore.createFile(qualifiedPath, overwrite, + OutputStream outputStream = abfsStore.createFile(qualifiedPath, statistics, overwrite, permission == null ? FsPermission.getFileDefault() : permission, FsPermission.getUMask(getConf())); + statIncrement(FILES_CREATED); return new FSDataOutputStream(outputStream, statistics); } catch(AzureBlobFileSystemException ex) { checkException(f, ex); @@ -203,6 +245,7 @@ public FSDataOutputStream createNonRecursive(final Path f, final FsPermission pe final boolean overwrite, final int bufferSize, final short replication, final long blockSize, final Progressable progress) throws IOException { + statIncrement(CALL_CREATE_NON_RECURSIVE); final Path parent = f.getParent(); final FileStatus parentFileStatus = tryGetFileStatus(parent); @@ -246,11 +289,11 @@ public FSDataOutputStream append(final Path f, final int bufferSize, final Progr "AzureBlobFileSystem.append path: {} bufferSize: {}", f.toString(), bufferSize); - + statIncrement(CALL_APPEND); Path qualifiedPath = makeQualified(f); try { - OutputStream outputStream = abfsStore.openFileForWrite(qualifiedPath, false); + OutputStream outputStream = abfsStore.openFileForWrite(qualifiedPath, statistics, false); return new FSDataOutputStream(outputStream, statistics); } catch(AzureBlobFileSystemException ex) { checkException(f, ex); @@ -259,8 +302,8 @@ public FSDataOutputStream append(final Path f, final int bufferSize, final Progr } public boolean rename(final Path src, final Path dst) throws IOException { - LOG.debug( - "AzureBlobFileSystem.rename src: {} dst: {}", src.toString(), dst.toString()); + LOG.debug("AzureBlobFileSystem.rename src: {} dst: {}", src, dst); + statIncrement(CALL_RENAME); trailingPeriodCheck(dst); @@ -310,6 +353,7 @@ public boolean rename(final Path src, final Path dst) throws IOException { abfsStore.rename(qualifiedSrcPath, qualifiedDstPath); return true; } catch(AzureBlobFileSystemException ex) { + LOG.debug("Rename operation failed. ", ex); checkException( src, ex, @@ -328,7 +372,7 @@ public boolean rename(final Path src, final Path dst) throws IOException { public boolean delete(final Path f, final boolean recursive) throws IOException { LOG.debug( "AzureBlobFileSystem.delete path: {} recursive: {}", f.toString(), recursive); - + statIncrement(CALL_DELETE); Path qualifiedPath = makeQualified(f); if (f.isRoot()) { @@ -353,7 +397,7 @@ public boolean delete(final Path f, final boolean recursive) throws IOException public FileStatus[] listStatus(final Path f) throws IOException { LOG.debug( "AzureBlobFileSystem.listStatus path: {}", f.toString()); - + statIncrement(CALL_LIST_STATUS); Path qualifiedPath = makeQualified(f); try { @@ -365,6 +409,24 @@ public FileStatus[] listStatus(final Path f) throws IOException { } } + /** + * Increment of an Abfs statistic. + * + * @param statistic AbfsStatistic that needs increment. + */ + private void statIncrement(AbfsStatistic statistic) { + incrementStatistic(statistic); + } + + /** + * Method for incrementing AbfsStatistic by a long value. + * + * @param statistic the Statistic to be incremented. + */ + private void incrementStatistic(AbfsStatistic statistic) { + abfsCounters.incrementCounter(statistic, 1); + } + /** * Performs a check for (.) until root in the path to throw an exception. * The purpose is to differentiate between dir/dir1 and dir/dir1. @@ -394,7 +456,7 @@ private void trailingPeriodCheck(Path path) throws IllegalArgumentException { public boolean mkdirs(final Path f, final FsPermission permission) throws IOException { LOG.debug( "AzureBlobFileSystem.mkdirs path: {} permissions: {}", f, permission); - + statIncrement(CALL_MKDIRS); trailingPeriodCheck(f); final Path parentFolder = f.getParent(); @@ -408,6 +470,7 @@ public boolean mkdirs(final Path f, final FsPermission permission) throws IOExce try { abfsStore.createDirectory(qualifiedPath, permission == null ? FsPermission.getDirDefault() : permission, FsPermission.getUMask(getConf())); + statIncrement(DIRECTORIES_CREATED); return true; } catch (AzureBlobFileSystemException ex) { checkException(f, ex, AzureServiceErrorCode.PATH_ALREADY_EXISTS); @@ -425,12 +488,13 @@ public synchronized void close() throws IOException { LOG.debug("AzureBlobFileSystem.close"); IOUtils.cleanupWithLogger(LOG, abfsStore, delegationTokenManager); this.isClosed = true; + LOG.debug("Closing Abfs: " + toString()); } @Override public FileStatus getFileStatus(final Path f) throws IOException { LOG.debug("AzureBlobFileSystem.getFileStatus path: {}", f); - + statIncrement(CALL_GET_FILE_STATUS); Path qualifiedPath = makeQualified(f); try { @@ -567,6 +631,11 @@ private boolean deleteRoot() throws IOException { @Override public Void call() throws Exception { delete(fs.getPath(), fs.isDirectory()); + if (fs.isDirectory()) { + statIncrement(DIRECTORIES_DELETED); + } else { + statIncrement(FILES_DELETED); + } return null; } }); @@ -642,15 +711,17 @@ public void setXAttr(final Path path, final String name, final byte[] value, fin throw new IllegalArgumentException("A valid name and value must be specified."); } + Path qualifiedPath = makeQualified(path); + try { - Hashtable properties = abfsStore.getPathStatus(path); + Hashtable properties = abfsStore.getPathStatus(qualifiedPath); String xAttrName = ensureValidAttributeName(name); boolean xAttrExists = properties.containsKey(xAttrName); XAttrSetFlag.validate(name, xAttrExists, flag); String xAttrValue = abfsStore.decodeAttribute(value); properties.put(xAttrName, xAttrValue); - abfsStore.setPathProperties(path, properties); + abfsStore.setPathProperties(qualifiedPath, properties); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } @@ -675,9 +746,11 @@ public byte[] getXAttr(final Path path, final String name) throw new IllegalArgumentException("A valid name must be specified."); } + Path qualifiedPath = makeQualified(path); + byte[] value = null; try { - Hashtable properties = abfsStore.getPathStatus(path); + Hashtable properties = abfsStore.getPathStatus(qualifiedPath); String xAttrName = ensureValidAttributeName(name); if (properties.containsKey(xAttrName)) { String xAttrValue = properties.get(xAttrName); @@ -736,7 +809,7 @@ public void setPermission(final Path path, final FsPermission permission) @Override public void modifyAclEntries(final Path path, final List aclSpec) throws IOException { - LOG.debug("AzureBlobFileSystem.modifyAclEntries path: {}", path.toString()); + LOG.debug("AzureBlobFileSystem.modifyAclEntries path: {}", path); if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( @@ -885,7 +958,7 @@ public void setAcl(final Path path, final List aclSpec) */ @Override public AclStatus getAclStatus(final Path path) throws IOException { - LOG.debug("AzureBlobFileSystem.getAclStatus path: {}", path.toString()); + LOG.debug("AzureBlobFileSystem.getAclStatus path: {}", path); if (!getIsNamespaceEnabled()) { throw new UnsupportedOperationException( @@ -926,11 +999,38 @@ public void access(final Path path, final FsAction mode) throws IOException { } } + /** + * Incrementing exists() calls from superclass for statistic collection. + * + * @param f source path. + * @return true if the path exists. + * @throws IOException + */ + @Override + public boolean exists(Path f) throws IOException { + statIncrement(CALL_EXIST); + return super.exists(f); + } + + @Override + public RemoteIterator listStatusIterator(Path path) + throws IOException { + LOG.debug("AzureBlobFileSystem.listStatusIterator path : {}", path); + if (abfsStore.getAbfsConfiguration().enableAbfsListIterator()) { + AbfsListStatusRemoteIterator abfsLsItr = + new AbfsListStatusRemoteIterator(getFileStatus(path), abfsStore); + return RemoteIterators.typeCastingRemoteIterator(abfsLsItr); + } else { + return super.listStatusIterator(path); + } + } + private FileStatus tryGetFileStatus(final Path f) { try { return getFileStatus(f); } catch (IOException ex) { LOG.debug("File not found {}", f); + statIncrement(ERROR_IGNORED); return null; } } @@ -947,6 +1047,7 @@ private boolean fileSystemExists() throws IOException { // there is not way to get the storage error code // workaround here is to check its status code. } catch (FileNotFoundException e) { + statIncrement(ERROR_IGNORED); return false; } } @@ -1058,7 +1159,8 @@ private void checkCheckAccessException(final Path path, * @param allowedErrorCodesList varargs list of error codes. * @throws IOException if the exception error code is not on the allowed list. */ - private void checkException(final Path path, + @VisibleForTesting + static void checkException(final Path path, final AzureBlobFileSystemException exception, final AzureServiceErrorCode... allowedErrorCodesList) throws IOException { if (exception instanceof AbfsRestOperationException) { @@ -1067,16 +1169,21 @@ private void checkException(final Path path, if (ArrayUtils.contains(allowedErrorCodesList, ere.getErrorCode())) { return; } - int statusCode = ere.getStatusCode(); - //AbfsRestOperationException.getMessage() contains full error info including path/uri. - if (statusCode == HttpURLConnection.HTTP_NOT_FOUND) { - throw (IOException) new FileNotFoundException(ere.getMessage()) + String message = ere.getMessage(); + + switch (ere.getStatusCode()) { + case HttpURLConnection.HTTP_NOT_FOUND: + throw (IOException) new FileNotFoundException(message) + .initCause(exception); + case HttpURLConnection.HTTP_CONFLICT: + throw (IOException) new FileAlreadyExistsException(message) .initCause(exception); - } else if (statusCode == HttpURLConnection.HTTP_CONFLICT) { - throw (IOException) new FileAlreadyExistsException(ere.getMessage()) + case HttpURLConnection.HTTP_FORBIDDEN: + case HttpURLConnection.HTTP_UNAUTHORIZED: + throw (IOException) new AccessDeniedException(message) .initCause(exception); - } else { + default: throw ere; } } else if (exception instanceof SASTokenProviderException) { @@ -1120,6 +1227,7 @@ private Throwable getRootCause(Throwable throwable) { */ @Override public synchronized Token getDelegationToken(final String renewer) throws IOException { + statIncrement(CALL_GET_DELEGATION_TOKEN); return this.delegationTokenEnabled ? this.delegationTokenManager.getDelegationToken(renewer) : super.getDelegationToken(renewer); } @@ -1182,6 +1290,11 @@ boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { return abfsStore.getIsNamespaceEnabled(); } + @VisibleForTesting + Map getInstrumentationMap() { + return abfsCounters.toMap(); + } + @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java index bff0e455cf00c..678f0b4f20953 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.IOException; import java.io.OutputStream; +import java.lang.reflect.InvocationTargetException; import java.io.UnsupportedEncodingException; import java.net.HttpURLConnection; import java.net.MalformedURLException; @@ -34,7 +35,6 @@ import java.nio.charset.CharsetDecoder; import java.nio.charset.CharsetEncoder; import java.nio.charset.StandardCharsets; -import java.text.ParseException; import java.text.SimpleDateFormat; import java.time.Instant; import java.util.ArrayList; @@ -46,11 +46,14 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Set; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -60,9 +63,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; +import org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.ConcurrentWriteOperationDetectedException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.FileSystemOperationUnhandledException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidAbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidFileSystemPropertyException; @@ -71,15 +76,26 @@ import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode; import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultEntrySchema; import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TrileanConversionException; +import org.apache.hadoop.fs.azurebfs.enums.Trilean; import org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider; import org.apache.hadoop.fs.azurebfs.extensions.ExtensionHelper; import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; +import org.apache.hadoop.fs.azurebfs.oauth2.AzureADAuthenticator; import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformer; +import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformerInterface; import org.apache.hadoop.fs.azurebfs.services.AbfsAclHelper; import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsClientContext; +import org.apache.hadoop.fs.azurebfs.services.AbfsClientContextBuilder; +import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamContext; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamStatisticsImpl; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamContext; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamStatisticsImpl; import org.apache.hadoop.fs.azurebfs.services.AbfsPermission; import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; import org.apache.hadoop.fs.azurebfs.services.AuthType; @@ -87,8 +103,10 @@ import org.apache.hadoop.fs.azurebfs.services.SharedKeyCredentials; import org.apache.hadoop.fs.azurebfs.services.AbfsPerfTracker; import org.apache.hadoop.fs.azurebfs.services.AbfsPerfInfo; +import org.apache.hadoop.fs.azurebfs.services.ListingSupport; import org.apache.hadoop.fs.azurebfs.utils.Base64; import org.apache.hadoop.fs.azurebfs.utils.CRC64; +import org.apache.hadoop.fs.azurebfs.utils.DateTimeUtils; import org.apache.hadoop.fs.azurebfs.utils.UriUtils; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; @@ -97,8 +115,6 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.http.client.utils.URIBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.CHAR_EQUALS; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.CHAR_FORWARD_SLASH; @@ -110,35 +126,42 @@ import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.SINGLE_WHITE_SPACE; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.TOKEN_VERSION; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_ABFS_ENDPOINT; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_BUFFERED_PREAD_DISABLE; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_IDENTITY_TRANSFORM_CLASS; /** * Provides the bridging logic between Hadoop's abstract filesystem and Azure Storage. */ @InterfaceAudience.Public @InterfaceStability.Evolving -public class AzureBlobFileSystemStore implements Closeable { +public class AzureBlobFileSystemStore implements Closeable, ListingSupport { private static final Logger LOG = LoggerFactory.getLogger(AzureBlobFileSystemStore.class); private AbfsClient client; private URI uri; private String userName; private String primaryUserGroup; - private static final String DATE_TIME_PATTERN = "E, dd MMM yyyy HH:mm:ss z"; private static final String TOKEN_DATE_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSSSSSS'Z'"; private static final String XMS_PROPERTIES_ENCODING = "ISO-8859-1"; private static final int GET_SET_AGGREGATE_COUNT = 2; private final AbfsConfiguration abfsConfiguration; private final Set azureAtomicRenameDirSet; - private boolean isNamespaceEnabledSet; - private boolean isNamespaceEnabled; + private Trilean isNamespaceEnabled; private final AuthType authType; private final UserGroupInformation userGroupInformation; - private final IdentityTransformer identityTransformer; + private final IdentityTransformerInterface identityTransformer; private final AbfsPerfTracker abfsPerfTracker; + private final AbfsCounters abfsCounters; - public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration configuration) - throws IOException { + /** + * The set of directories where we should store files as append blobs. + */ + private Set appendBlobDirSet; + + public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, + Configuration configuration, + AbfsCounters abfsCounters) throws IOException { this.uri = uri; String[] authorityParts = authorityParts(uri); final String fileSystemName = authorityParts[0]; @@ -152,6 +175,8 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration c LOG.trace("AbfsConfiguration init complete"); + this.isNamespaceEnabled = abfsConfiguration.getIsNamespaceEnabledAccount(); + this.userGroupInformation = UserGroupInformation.getCurrentUser(); this.userName = userGroupInformation.getShortUserName(); LOG.trace("UGI init complete"); @@ -174,9 +199,35 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration c boolean usingOauth = (authType == AuthType.OAuth); boolean useHttps = (usingOauth || abfsConfiguration.isHttpsAlwaysUsed()) ? true : isSecureScheme; this.abfsPerfTracker = new AbfsPerfTracker(fileSystemName, accountName, this.abfsConfiguration); + this.abfsCounters = abfsCounters; initializeClient(uri, fileSystemName, accountName, useHttps); - this.identityTransformer = new IdentityTransformer(abfsConfiguration.getRawConfiguration()); + final Class identityTransformerClass = + configuration.getClass(FS_AZURE_IDENTITY_TRANSFORM_CLASS, IdentityTransformer.class, + IdentityTransformerInterface.class); + try { + this.identityTransformer = + identityTransformerClass.getConstructor(Configuration.class).newInstance(configuration); + } catch (IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException e) { + throw new IOException(e); + } LOG.trace("IdentityTransformer init complete"); + + // Extract the directories that should contain append blobs + String appendBlobDirs = abfsConfiguration.getAppendBlobDirs(); + if (appendBlobDirs.trim().isEmpty()) { + this.appendBlobDirSet = new HashSet(); + } else { + this.appendBlobDirSet = new HashSet<>(Arrays.asList( + abfsConfiguration.getAppendBlobDirs().split(AbfsHttpConstants.COMMA))); + } + } + + /** + * Checks if the given key in Azure Storage should be stored as a page + * blob instead of block blob. + */ + public boolean isAppendBlobKey(String key) { + return isKeyForDirectorySet(key, appendBlobDirSet); } /** @@ -231,26 +282,32 @@ private String[] authorityParts(URI uri) throws InvalidUriAuthorityException, In } public boolean getIsNamespaceEnabled() throws AzureBlobFileSystemException { - if (!isNamespaceEnabledSet) { + try { + return this.isNamespaceEnabled.toBoolean(); + } catch (TrileanConversionException e) { + LOG.debug("isNamespaceEnabled is UNKNOWN; fall back and determine through" + + " getAcl server call", e); + } - LOG.debug("Get root ACL status"); - try (AbfsPerfInfo perfInfo = startTracking("getIsNamespaceEnabled", "getAclStatus")) { - AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH); - perfInfo.registerResult(op.getResult()); - isNamespaceEnabled = true; - perfInfo.registerSuccess(true); - } catch (AbfsRestOperationException ex) { - // Get ACL status is a HEAD request, its response doesn't contain errorCode - // So can only rely on its status code to determine its account type. - if (HttpURLConnection.HTTP_BAD_REQUEST != ex.getStatusCode()) { - throw ex; - } - isNamespaceEnabled = false; + LOG.debug("Get root ACL status"); + try (AbfsPerfInfo perfInfo = startTracking("getIsNamespaceEnabled", + "getAclStatus")) { + AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.ROOT_PATH); + perfInfo.registerResult(op.getResult()); + isNamespaceEnabled = Trilean.getTrilean(true); + perfInfo.registerSuccess(true); + } catch (AbfsRestOperationException ex) { + // Get ACL status is a HEAD request, its response doesn't contain + // errorCode + // So can only rely on its status code to determine its account type. + if (HttpURLConnection.HTTP_BAD_REQUEST != ex.getStatusCode()) { + throw ex; } - isNamespaceEnabledSet = true; + + isNamespaceEnabled = Trilean.getTrilean(false); } - return isNamespaceEnabled; + return isNamespaceEnabled.toBoolean(); } @VisibleForTesting @@ -340,7 +397,7 @@ public Hashtable getPathStatus(final Path path) throws AzureBlob path); final Hashtable parsedXmsProperties; - final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + final AbfsRestOperation op = client.getPathStatus(getRelativePath(path), true); perfInfo.registerResult(op.getResult()); final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES); @@ -366,7 +423,7 @@ public void setPathProperties(final Path path, final Hashtable p } catch (CharacterCodingException ex) { throw new InvalidAbfsRestOperationException(ex); } - final AbfsRestOperation op = client.setPathProperties(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), commaSeparatedProperties); + final AbfsRestOperation op = client.setPathProperties(getRelativePath(path), commaSeparatedProperties); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } @@ -391,31 +448,146 @@ public void deleteFilesystem() throws AzureBlobFileSystemException { } } - public OutputStream createFile(final Path path, final boolean overwrite, final FsPermission permission, - final FsPermission umask) throws AzureBlobFileSystemException { + public OutputStream createFile(final Path path, + final FileSystem.Statistics statistics, + final boolean overwrite, final FsPermission permission, + final FsPermission umask) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("createFile", "createPath")) { boolean isNamespaceEnabled = getIsNamespaceEnabled(); LOG.debug("createFile filesystem: {} path: {} overwrite: {} permission: {} umask: {} isNamespaceEnabled: {}", client.getFileSystem(), path, overwrite, - permission.toString(), - umask.toString(), + permission, + umask, isNamespaceEnabled); - final AbfsRestOperation op = client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), true, overwrite, - isNamespaceEnabled ? getOctalNotation(permission) : null, - isNamespaceEnabled ? getOctalNotation(umask) : null); + String relativePath = getRelativePath(path); + boolean isAppendBlob = false; + if (isAppendBlobKey(path.toString())) { + isAppendBlob = true; + } + + // if "fs.azure.enable.conditional.create.overwrite" is enabled and + // is a create request with overwrite=true, create will follow different + // flow. + boolean triggerConditionalCreateOverwrite = false; + if (overwrite + && abfsConfiguration.isConditionalCreateOverwriteEnabled()) { + triggerConditionalCreateOverwrite = true; + } + + AbfsRestOperation op; + if (triggerConditionalCreateOverwrite) { + op = conditionalCreateOverwriteFile(relativePath, + statistics, + isNamespaceEnabled ? getOctalNotation(permission) : null, + isNamespaceEnabled ? getOctalNotation(umask) : null, + isAppendBlob + ); + + } else { + op = client.createPath(relativePath, true, + overwrite, + isNamespaceEnabled ? getOctalNotation(permission) : null, + isNamespaceEnabled ? getOctalNotation(umask) : null, + isAppendBlob, + null); + } perfInfo.registerResult(op.getResult()).registerSuccess(true); return new AbfsOutputStream( - client, - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), - 0, - abfsConfiguration.getWriteBufferSize(), - abfsConfiguration.isFlushEnabled(), - abfsConfiguration.isOutputStreamFlushDisabled()); + client, + statistics, + relativePath, + 0, + populateAbfsOutputStreamContext(isAppendBlob)); + } + } + + /** + * Conditional create overwrite flow ensures that create overwrites is done + * only if there is match for eTag of existing file. + * @param relativePath + * @param statistics + * @param permission + * @param umask + * @param isAppendBlob + * @return + * @throws AzureBlobFileSystemException + */ + private AbfsRestOperation conditionalCreateOverwriteFile(final String relativePath, + final FileSystem.Statistics statistics, + final String permission, + final String umask, + final boolean isAppendBlob) throws AzureBlobFileSystemException { + AbfsRestOperation op; + + try { + // Trigger a create with overwrite=false first so that eTag fetch can be + // avoided for cases when no pre-existing file is present (major portion + // of create file traffic falls into the case of no pre-existing file). + op = client.createPath(relativePath, true, + false, permission, umask, isAppendBlob, null); + } catch (AbfsRestOperationException e) { + if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) { + // File pre-exists, fetch eTag + try { + op = client.getPathStatus(relativePath, false); + } catch (AbfsRestOperationException ex) { + if (ex.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { + // Is a parallel access case, as file which was found to be + // present went missing by this request. + throw new ConcurrentWriteOperationDetectedException( + "Parallel access to the create path detected. Failing request " + + "to honor single writer semantics"); + } else { + throw ex; + } + } + + String eTag = op.getResult() + .getResponseHeader(HttpHeaderConfigurations.ETAG); + + try { + // overwrite only if eTag matches with the file properties fetched befpre + op = client.createPath(relativePath, true, + true, permission, umask, isAppendBlob, eTag); + } catch (AbfsRestOperationException ex) { + if (ex.getStatusCode() == HttpURLConnection.HTTP_PRECON_FAILED) { + // Is a parallel access case, as file with eTag was just queried + // and precondition failure can happen only when another file with + // different etag got created. + throw new ConcurrentWriteOperationDetectedException( + "Parallel access to the create path detected. Failing request " + + "to honor single writer semantics"); + } else { + throw ex; + } + } + } else { + throw e; + } + } + + return op; + } + + private AbfsOutputStreamContext populateAbfsOutputStreamContext(boolean isAppendBlob) { + int bufferSize = abfsConfiguration.getWriteBufferSize(); + if (isAppendBlob && bufferSize > FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE) { + bufferSize = FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE; } + return new AbfsOutputStreamContext(abfsConfiguration.getSasTokenRenewPeriodForStreamsInSeconds()) + .withWriteBufferSize(bufferSize) + .enableFlush(abfsConfiguration.isFlushEnabled()) + .enableSmallWriteOptimization(abfsConfiguration.isSmallWriteOptimizationEnabled()) + .disableOutputStreamFlush(abfsConfiguration.isOutputStreamFlushDisabled()) + .withStreamStatistics(new AbfsOutputStreamStatisticsImpl()) + .withAppendBlob(isAppendBlob) + .withWriteMaxConcurrentRequestCount(abfsConfiguration.getWriteMaxConcurrentRequestCount()) + .withMaxWriteRequestsToQueue(abfsConfiguration.getMaxWriteRequestsToQueue()) + .build(); } public void createDirectory(final Path path, final FsPermission permission, final FsPermission umask) @@ -429,21 +601,31 @@ public void createDirectory(final Path path, final FsPermission permission, fina umask, isNamespaceEnabled); - final AbfsRestOperation op = client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), false, true, + final AbfsRestOperation op = client.createPath(getRelativePath(path), false, true, isNamespaceEnabled ? getOctalNotation(permission) : null, - isNamespaceEnabled ? getOctalNotation(umask) : null); + isNamespaceEnabled ? getOctalNotation(umask) : null, false, null); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } - public AbfsInputStream openFileForRead(final Path path, final FileSystem.Statistics statistics) + public AbfsInputStream openFileForRead(final Path path, + final FileSystem.Statistics statistics) + throws AzureBlobFileSystemException { + return openFileForRead(path, Optional.empty(), statistics); + } + + public AbfsInputStream openFileForRead(final Path path, + final Optional options, + final FileSystem.Statistics statistics) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("openFileForRead", "getPathStatus")) { LOG.debug("openFileForRead filesystem: {} path: {}", client.getFileSystem(), path); - final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + String relativePath = getRelativePath(path); + + final AbfsRestOperation op = client.getPathStatus(relativePath, false); perfInfo.registerResult(op.getResult()); final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); @@ -462,13 +644,32 @@ public AbfsInputStream openFileForRead(final Path path, final FileSystem.Statist // Add statistics for InputStream return new AbfsInputStream(client, statistics, - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), contentLength, - abfsConfiguration.getReadBufferSize(), abfsConfiguration.getReadAheadQueueDepth(), - abfsConfiguration.getTolerateOobAppends(), eTag); + relativePath, contentLength, + populateAbfsInputStreamContext(options), + eTag); } } - public OutputStream openFileForWrite(final Path path, final boolean overwrite) throws + private AbfsInputStreamContext populateAbfsInputStreamContext( + Optional options) { + boolean bufferedPreadDisabled = options + .map(c -> c.getBoolean(FS_AZURE_BUFFERED_PREAD_DISABLE, false)) + .orElse(false); + return new AbfsInputStreamContext(abfsConfiguration.getSasTokenRenewPeriodForStreamsInSeconds()) + .withReadBufferSize(abfsConfiguration.getReadBufferSize()) + .withReadAheadQueueDepth(abfsConfiguration.getReadAheadQueueDepth()) + .withTolerateOobAppends(abfsConfiguration.getTolerateOobAppends()) + .withReadSmallFilesCompletely(abfsConfiguration.readSmallFilesCompletely()) + .withOptimizeFooterRead(abfsConfiguration.optimizeFooterRead()) + .withStreamStatistics(new AbfsInputStreamStatisticsImpl()) + .withShouldReadBufferSizeAlways( + abfsConfiguration.shouldReadBufferSizeAlways()) + .withReadAheadBlockSize(abfsConfiguration.getReadAheadBlockSize()) + .withBufferedPreadDisabled(bufferedPreadDisabled) + .build(); + } + + public OutputStream openFileForWrite(final Path path, final FileSystem.Statistics statistics, final boolean overwrite) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfo = startTracking("openFileForWrite", "getPathStatus")) { LOG.debug("openFileForWrite filesystem: {} path: {} overwrite: {}", @@ -476,7 +677,9 @@ public OutputStream openFileForWrite(final Path path, final boolean overwrite) t path, overwrite); - final AbfsRestOperation op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + String relativePath = getRelativePath(path); + + final AbfsRestOperation op = client.getPathStatus(relativePath, false); perfInfo.registerResult(op.getResult()); final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE); @@ -494,13 +697,17 @@ public OutputStream openFileForWrite(final Path path, final boolean overwrite) t perfInfo.registerSuccess(true); + boolean isAppendBlob = false; + if (isAppendBlobKey(path.toString())) { + isAppendBlob = true; + } + return new AbfsOutputStream( - client, - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), - offset, - abfsConfiguration.getWriteBufferSize(), - abfsConfiguration.isFlushEnabled(), - abfsConfiguration.isOutputStreamFlushDisabled()); + client, + statistics, + relativePath, + offset, + populateAbfsOutputStreamContext(isAppendBlob)); } } @@ -522,10 +729,13 @@ public void rename(final Path source, final Path destination) throws String continuation = null; + String sourceRelativePath = getRelativePath(source); + String destinationRelativePath = getRelativePath(destination); + do { try (AbfsPerfInfo perfInfo = startTracking("rename", "renamePath")) { - AbfsRestOperation op = client.renamePath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(source), - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(destination), continuation); + AbfsRestOperation op = client.renamePath(sourceRelativePath, + destinationRelativePath, continuation); perfInfo.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); perfInfo.registerSuccess(true); @@ -552,10 +762,12 @@ public void delete(final Path path, final boolean recursive) String continuation = null; + String relativePath = getRelativePath(path); + do { try (AbfsPerfInfo perfInfo = startTracking("delete", "deletePath")) { AbfsRestOperation op = client.deletePath( - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), recursive, continuation); + relativePath, recursive, continuation); perfInfo.registerResult(op.getResult()); continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION); perfInfo.registerSuccess(true); @@ -581,14 +793,14 @@ public FileStatus getFileStatus(final Path path) throws IOException { if (path.isRoot()) { if (isNamespaceEnabled) { perfInfo.registerCallee("getAclStatus"); - op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + AbfsHttpConstants.ROOT_PATH); + op = client.getAclStatus(getRelativePath(path)); } else { perfInfo.registerCallee("getFilesystemProperties"); op = client.getFilesystemProperties(); } } else { perfInfo.registerCallee("getPathStatus"); - op = client.getPathStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path)); + op = client.getPathStatus(getRelativePath(path), false); } perfInfo.registerResult(op.getResult()); @@ -632,7 +844,7 @@ public FileStatus getFileStatus(final Path path) throws IOException { resourceIsDir, 1, blockSize, - parseLastModifiedTime(lastModified), + DateTimeUtils.parseLastModifiedTime(lastModified), path, eTag); } @@ -642,6 +854,7 @@ public FileStatus getFileStatus(final Path path) throws IOException { * @param path The list path. * @return the entries in the path. * */ + @Override public FileStatus[] listStatus(final Path path) throws IOException { return listStatus(path, null); } @@ -658,7 +871,17 @@ public FileStatus[] listStatus(final Path path) throws IOException { * @return the entries in the path start from "startFrom" in lexical order. * */ @InterfaceStability.Unstable + @Override public FileStatus[] listStatus(final Path path, final String startFrom) throws IOException { + List fileStatuses = new ArrayList<>(); + listStatus(path, startFrom, fileStatuses, true, null); + return fileStatuses.toArray(new FileStatus[fileStatuses.size()]); + } + + @Override + public String listStatus(final Path path, final String startFrom, + List fileStatuses, final boolean fetchAll, + String continuation) throws IOException { final Instant startAggregate = abfsPerfTracker.getLatencyInstant(); long countAggregate = 0; boolean shouldContinue = true; @@ -668,17 +891,17 @@ public FileStatus[] listStatus(final Path path, final String startFrom) throws I path, startFrom); - final String relativePath = path.isRoot() ? AbfsHttpConstants.EMPTY_STRING : getRelativePath(path); - String continuation = null; + final String relativePath = getRelativePath(path); - // generate continuation token if a valid startFrom is provided. - if (startFrom != null && !startFrom.isEmpty()) { - continuation = getIsNamespaceEnabled() - ? generateContinuationTokenForXns(startFrom) - : generateContinuationTokenForNonXns(path.isRoot() ? ROOT_PATH : relativePath, startFrom); + if (continuation == null || continuation.isEmpty()) { + // generate continuation token if a valid startFrom is provided. + if (startFrom != null && !startFrom.isEmpty()) { + continuation = getIsNamespaceEnabled() + ? generateContinuationTokenForXns(startFrom) + : generateContinuationTokenForNonXns(relativePath, startFrom); + } } - ArrayList fileStatuses = new ArrayList<>(); do { try (AbfsPerfInfo perfInfo = startTracking("listStatus", "listPath")) { AbfsRestOperation op = client.listPath(relativePath, false, @@ -708,7 +931,8 @@ public FileStatus[] listStatus(final Path path, final String startFrom) throws I long contentLength = entry.contentLength() == null ? 0 : entry.contentLength(); boolean isDirectory = entry.isDirectory() == null ? false : entry.isDirectory(); if (entry.lastModified() != null && !entry.lastModified().isEmpty()) { - lastModifiedMillis = parseLastModifiedTime(entry.lastModified()); + lastModifiedMillis = DateTimeUtils.parseLastModifiedTime( + entry.lastModified()); } Path entryPath = new Path(File.separator + entry.name()); @@ -731,7 +955,8 @@ public FileStatus[] listStatus(final Path path, final String startFrom) throws I perfInfo.registerSuccess(true); countAggregate++; - shouldContinue = continuation != null && !continuation.isEmpty(); + shouldContinue = + fetchAll && continuation != null && !continuation.isEmpty(); if (!shouldContinue) { perfInfo.registerAggregates(startAggregate, countAggregate); @@ -739,7 +964,7 @@ public FileStatus[] listStatus(final Path path, final String startFrom) throws I } } while (shouldContinue); - return fileStatuses.toArray(new FileStatus[fileStatuses.size()]); + return continuation; } // generate continuation token for xns account @@ -763,12 +988,13 @@ private String generateContinuationTokenForXns(final String firstEntryName) { } // generate continuation token for non-xns account - private String generateContinuationTokenForNonXns(final String path, final String firstEntryName) { + private String generateContinuationTokenForNonXns(String path, final String firstEntryName) { Preconditions.checkArgument(!Strings.isNullOrEmpty(firstEntryName) && !firstEntryName.startsWith(AbfsHttpConstants.ROOT_PATH), "startFrom must be a dir/file name and it can not be a full path"); // Notice: non-xns continuation token requires full path (first "/" is not included) for startFrom + path = AbfsClient.getDirectoryQueryParameter(path); final String startFrom = (path.isEmpty() || path.equals(ROOT_PATH)) ? firstEntryName : path + ROOT_PATH + firstEntryName; @@ -809,15 +1035,14 @@ public void setOwner(final Path path, final String owner, final String group) th LOG.debug( "setOwner filesystem: {} path: {} owner: {} group: {}", client.getFileSystem(), - path.toString(), + path, owner, group); final String transformedOwner = identityTransformer.transformUserOrGroupForSetRequest(owner); final String transformedGroup = identityTransformer.transformUserOrGroupForSetRequest(group); - final AbfsRestOperation op = client.setOwner( - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + final AbfsRestOperation op = client.setOwner(getRelativePath(path), transformedOwner, transformedGroup); @@ -837,11 +1062,10 @@ public void setPermission(final Path path, final FsPermission permission) throws LOG.debug( "setPermission filesystem: {} path: {} permission: {}", client.getFileSystem(), - path.toString(), - permission.toString()); + path, + permission); - final AbfsRestOperation op = client.setPermission( - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + final AbfsRestOperation op = client.setPermission(getRelativePath(path), String.format(AbfsHttpConstants.PERMISSION_FORMAT, permission.toOctal())); perfInfo.registerResult(op.getResult()).registerSuccess(true); @@ -860,14 +1084,16 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro LOG.debug( "modifyAclEntries filesystem: {} path: {} aclSpec: {}", client.getFileSystem(), - path.toString(), + path, AclEntry.aclSpecToString(aclSpec)); identityTransformer.transformAclEntriesForSetRequest(aclSpec); final Map modifyAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); boolean useUpn = AbfsAclHelper.isUpnFormatAclEntries(modifyAclEntries); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), useUpn); + String relativePath = getRelativePath(path); + + final AbfsRestOperation op = client.getAclStatus(relativePath, useUpn); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -879,7 +1105,7 @@ public void modifyAclEntries(final Path path, final List aclSpec) thro try (AbfsPerfInfo perfInfoSet = startTracking("modifyAclEntries", "setAcl")) { final AbfsRestOperation setAclOp - = client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + = client.setAcl(relativePath, AbfsAclHelper.serializeAclSpec(aclEntries), eTag); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) @@ -899,14 +1125,16 @@ public void removeAclEntries(final Path path, final List aclSpec) thro LOG.debug( "removeAclEntries filesystem: {} path: {} aclSpec: {}", client.getFileSystem(), - path.toString(), + path, AclEntry.aclSpecToString(aclSpec)); identityTransformer.transformAclEntriesForSetRequest(aclSpec); final Map removeAclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(removeAclEntries); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); + String relativePath = getRelativePath(path); + + final AbfsRestOperation op = client.getAclStatus(relativePath, isUpnFormat); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -918,7 +1146,7 @@ public void removeAclEntries(final Path path, final List aclSpec) thro try (AbfsPerfInfo perfInfoSet = startTracking("removeAclEntries", "setAcl")) { final AbfsRestOperation setAclOp = - client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + client.setAcl(relativePath, AbfsAclHelper.serializeAclSpec(aclEntries), eTag); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) @@ -938,9 +1166,11 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio LOG.debug( "removeDefaultAcl filesystem: {} path: {}", client.getFileSystem(), - path.toString()); + path); + + String relativePath = getRelativePath(path); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); + final AbfsRestOperation op = client.getAclStatus(relativePath); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_ACL)); @@ -958,7 +1188,7 @@ public void removeDefaultAcl(final Path path) throws AzureBlobFileSystemExceptio try (AbfsPerfInfo perfInfoSet = startTracking("removeDefaultAcl", "setAcl")) { final AbfsRestOperation setAclOp = - client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + client.setAcl(relativePath, AbfsAclHelper.serializeAclSpec(aclEntries), eTag); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) @@ -978,9 +1208,11 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { LOG.debug( "removeAcl filesystem: {} path: {}", client.getFileSystem(), - path.toString()); + path); + + String relativePath = getRelativePath(path); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); + final AbfsRestOperation op = client.getAclStatus(relativePath); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -995,7 +1227,7 @@ public void removeAcl(final Path path) throws AzureBlobFileSystemException { try (AbfsPerfInfo perfInfoSet = startTracking("removeAcl", "setAcl")) { final AbfsRestOperation setAclOp = - client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + client.setAcl(relativePath, AbfsAclHelper.serializeAclSpec(newAclEntries), eTag); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) @@ -1015,14 +1247,16 @@ public void setAcl(final Path path, final List aclSpec) throws AzureBl LOG.debug( "setAcl filesystem: {} path: {} aclspec: {}", client.getFileSystem(), - path.toString(), + path, AclEntry.aclSpecToString(aclSpec)); identityTransformer.transformAclEntriesForSetRequest(aclSpec); final Map aclEntries = AbfsAclHelper.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec)); final boolean isUpnFormat = AbfsAclHelper.isUpnFormatAclEntries(aclEntries); - final AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), isUpnFormat); + String relativePath = getRelativePath(path); + + final AbfsRestOperation op = client.getAclStatus(relativePath, isUpnFormat); perfInfoGet.registerResult(op.getResult()); final String eTag = op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG); @@ -1034,7 +1268,7 @@ public void setAcl(final Path path, final List aclSpec) throws AzureBl try (AbfsPerfInfo perfInfoSet = startTracking("setAcl", "setAcl")) { final AbfsRestOperation setAclOp = - client.setAcl(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true), + client.setAcl(relativePath, AbfsAclHelper.serializeAclSpec(aclEntries), eTag); perfInfoSet.registerResult(setAclOp.getResult()) .registerSuccess(true) @@ -1054,9 +1288,9 @@ public AclStatus getAclStatus(final Path path) throws IOException { LOG.debug( "getAclStatus filesystem: {} path: {}", client.getFileSystem(), - path.toString()); + path); - AbfsRestOperation op = client.getAclStatus(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true)); + AbfsRestOperation op = client.getAclStatus(getRelativePath(path)); AbfsHttpOperation result = op.getResult(); perfInfo.registerResult(result); @@ -1100,10 +1334,8 @@ public void access(final Path path, final FsAction mode) return; } try (AbfsPerfInfo perfInfo = startTracking("access", "checkAccess")) { - String relativePath = - AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path, true); final AbfsRestOperation op = this.client - .checkAccess(relativePath, mode.SYMBOL); + .checkAccess(getRelativePath(path), mode.SYMBOL); perfInfo.registerResult(op.getResult()).registerSuccess(true); } } @@ -1112,7 +1344,19 @@ public boolean isAtomicRenameKey(String key) { return isKeyForDirectorySet(key, azureAtomicRenameDirSet); } - private void initializeClient(URI uri, String fileSystemName, String accountName, boolean isSecure) + /** + * A on-off operation to initialize AbfsClient for AzureBlobFileSystem + * Operations. + * + * @param uri Uniform resource identifier for Abfs. + * @param fileSystemName Name of the fileSystem being used. + * @param accountName Name of the account being used to access Azure + * data store. + * @param isSecure Tells if https is being used or http. + * @throws IOException + */ + private void initializeClient(URI uri, String fileSystemName, + String accountName, boolean isSecure) throws IOException { if (this.client != null) { return; @@ -1133,6 +1377,10 @@ private void initializeClient(URI uri, String fileSystemName, String accountName AccessTokenProvider tokenProvider = null; SASTokenProvider sasTokenProvider = null; + if (authType == AuthType.OAuth) { + AzureADAuthenticator.init(abfsConfiguration); + } + if (authType == AuthType.SharedKey) { LOG.trace("Fetching SharedKey credentials"); int dotIndex = accountName.indexOf(AbfsHttpConstants.DOT); @@ -1155,38 +1403,38 @@ private void initializeClient(URI uri, String fileSystemName, String accountName LOG.trace("Initializing AbfsClient for {}", baseUrl); if (tokenProvider != null) { this.client = new AbfsClient(baseUrl, creds, abfsConfiguration, - new ExponentialRetryPolicy(abfsConfiguration.getMaxIoRetries()), - tokenProvider, abfsPerfTracker); + tokenProvider, + populateAbfsClientContext()); } else { this.client = new AbfsClient(baseUrl, creds, abfsConfiguration, - new ExponentialRetryPolicy(abfsConfiguration.getMaxIoRetries()), - sasTokenProvider, abfsPerfTracker); + sasTokenProvider, + populateAbfsClientContext()); } LOG.trace("AbfsClient init complete"); } + /** + * Populate a new AbfsClientContext instance with the desired properties. + * + * @return an instance of AbfsClientContext. + */ + private AbfsClientContext populateAbfsClientContext() { + return new AbfsClientContextBuilder() + .withExponentialRetryPolicy( + new ExponentialRetryPolicy(abfsConfiguration.getMaxIoRetries())) + .withAbfsCounters(abfsCounters) + .withAbfsPerfTracker(abfsPerfTracker) + .build(); + } + private String getOctalNotation(FsPermission fsPermission) { Preconditions.checkNotNull(fsPermission, "fsPermission"); return String.format(AbfsHttpConstants.PERMISSION_FORMAT, fsPermission.toOctal()); } private String getRelativePath(final Path path) { - return getRelativePath(path, false); - } - - private String getRelativePath(final Path path, final boolean allowRootPath) { Preconditions.checkNotNull(path, "path"); - final String relativePath = path.toUri().getPath(); - - if (relativePath.length() == 0 || (relativePath.length() == 1 && relativePath.charAt(0) == Path.SEPARATOR_CHAR)) { - return allowRootPath ? AbfsHttpConstants.ROOT_PATH : AbfsHttpConstants.EMPTY_STRING; - } - - if (relativePath.charAt(0) == Path.SEPARATOR_CHAR) { - return relativePath.substring(1); - } - - return relativePath; + return path.toUri().getPath(); } private long parseContentLength(final String contentLength) { @@ -1202,18 +1450,6 @@ private boolean parseIsDirectory(final String resourceType) { && resourceType.equalsIgnoreCase(AbfsHttpConstants.DIRECTORY); } - private long parseLastModifiedTime(final String lastModifiedTime) { - long parsedTime = 0; - try { - Date utcDate = new SimpleDateFormat(DATE_TIME_PATTERN, Locale.US).parse(lastModifiedTime); - parsedTime = utcDate.getTime(); - } catch (ParseException e) { - LOG.error("Failed to parse the date {}", lastModifiedTime); - } finally { - return parsedTime; - } - } - private String convertXmsPropertiesToCommaSeparatedString(final Hashtable properties) throws CharacterCodingException { StringBuilder commaSeparatedProperties = new StringBuilder(); @@ -1386,4 +1622,15 @@ public String toString() { AbfsClient getClient() { return this.client; } -} \ No newline at end of file + + @VisibleForTesting + void setClient(AbfsClient client) { + this.client = client; + } + + @VisibleForTesting + void setNamespaceEnabled(Trilean isNamespaceEnabled){ + this.isNamespaceEnabled = isNamespaceEnabled; + } + +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java index c6ade9cb99d75..184657e7d66ad 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java @@ -40,12 +40,16 @@ public final class AbfsHttpConstants { public static final String CHECK_ACCESS = "checkAccess"; public static final String GET_STATUS = "getStatus"; public static final String DEFAULT_TIMEOUT = "90"; + public static final String APPEND_BLOB_TYPE = "appendblob"; public static final String TOKEN_VERSION = "2"; + public static final String JAVA_VENDOR = "java.vendor"; public static final String JAVA_VERSION = "java.version"; public static final String OS_NAME = "os.name"; public static final String OS_VERSION = "os.version"; + public static final String OS_ARCH = "os.arch"; + public static final String APN_VERSION = "APN/1.0"; public static final String CLIENT_VERSION = "Azure Blob FS/" + VersionInfo.getVersion(); // Abfs Http Verb @@ -71,6 +75,8 @@ public final class AbfsHttpConstants { public static final String SEMICOLON = ";"; public static final String AT = "@"; public static final String HTTP_HEADER_PREFIX = "x-ms-"; + public static final String HASH = "#"; + public static final String TRUE = "true"; public static final String PLUS_ENCODE = "%20"; public static final String FORWARD_SLASH_ENCODE = "%2F"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java index a63e95353497f..585786491236e 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileSystem; /** * Responsible to keep all the Azure Blob File System configurations keys in Hadoop configuration file. @@ -27,6 +28,13 @@ @InterfaceAudience.Public @InterfaceStability.Evolving public final class ConfigurationKeys { + + /** + * Config to specify if the configured account is HNS enabled or not. If + * this config is not set, getacl call is made on account filesystem root + * path to determine HNS status. + */ + public static final String FS_AZURE_ACCOUNT_IS_HNS_ENABLED = "fs.azure.account.hns.enabled"; public static final String FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME = "fs.azure.account.key"; public static final String FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME_REGX = "fs\\.azure\\.account\\.key\\.(.*)"; public static final String FS_AZURE_SECURE_MODE = "fs.azure.secure.mode"; @@ -36,10 +44,30 @@ public final class ConfigurationKeys { public static final String AZURE_MAX_BACKOFF_INTERVAL = "fs.azure.io.retry.max.backoff.interval"; public static final String AZURE_BACKOFF_INTERVAL = "fs.azure.io.retry.backoff.interval"; public static final String AZURE_MAX_IO_RETRIES = "fs.azure.io.retry.max.retries"; + public static final String AZURE_CUSTOM_TOKEN_FETCH_RETRY_COUNT = "fs.azure.custom.token.fetch.retry.count"; + + // Retry strategy for getToken calls + public static final String AZURE_OAUTH_TOKEN_FETCH_RETRY_COUNT = "fs.azure.oauth.token.fetch.retry.max.retries"; + public static final String AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF = "fs.azure.oauth.token.fetch.retry.min.backoff.interval"; + public static final String AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF = "fs.azure.oauth.token.fetch.retry.max.backoff.interval"; + public static final String AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF = "fs.azure.oauth.token.fetch.retry.delta.backoff"; // Read and write buffer sizes defined by the user + public static final String AZURE_WRITE_MAX_CONCURRENT_REQUESTS = "fs.azure.write.max.concurrent.requests"; + public static final String AZURE_WRITE_MAX_REQUESTS_TO_QUEUE = "fs.azure.write.max.requests.to.queue"; public static final String AZURE_WRITE_BUFFER_SIZE = "fs.azure.write.request.size"; + /** If the data size written by Hadoop app is small, i.e. data size : + * (a) before any of HFlush/HSync call is made or + * (b) between 2 HFlush/Hsync API calls + * is less than write buffer size, 2 separate calls, one for append and + * another for flush are made. + * By enabling the small write optimization, a single call will be made to + * perform both append and flush operations and hence reduce request count. + */ + public static final String AZURE_ENABLE_SMALL_WRITE_OPTIMIZATION = "fs.azure.write.enableappendwithflush"; public static final String AZURE_READ_BUFFER_SIZE = "fs.azure.read.request.size"; + public static final String AZURE_READ_SMALL_FILES_COMPLETELY = "fs.azure.read.smallfilescompletely"; + public static final String AZURE_READ_OPTIMIZE_FOOTER_READ = "fs.azure.read.optimizefooterread"; public static final String AZURE_BLOCK_SIZE_PROPERTY_NAME = "fs.azure.block.size"; public static final String AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME = "fs.azure.block.location.impersonatedhost"; public static final String AZURE_CONCURRENT_CONNECTION_VALUE_OUT = "fs.azure.concurrentRequestCount.out"; @@ -51,7 +79,16 @@ public final class ConfigurationKeys { public static final String FS_AZURE_ENABLE_AUTOTHROTTLING = "fs.azure.enable.autothrottling"; public static final String FS_AZURE_ALWAYS_USE_HTTPS = "fs.azure.always.use.https"; public static final String FS_AZURE_ATOMIC_RENAME_KEY = "fs.azure.atomic.rename.key"; + /** This config ensures that during create overwrite an existing file will be + * overwritten only if there is a match on the eTag of existing file. + */ + public static final String FS_AZURE_ENABLE_CONDITIONAL_CREATE_OVERWRITE = "fs.azure.enable.conditional.create.overwrite"; + /** Provides a config to provide comma separated path prefixes on which Appendblob based files are created + * Default is empty. **/ + public static final String FS_AZURE_APPEND_BLOB_KEY = "fs.azure.appendblob.directories"; public static final String FS_AZURE_READ_AHEAD_QUEUE_DEPTH = "fs.azure.readaheadqueue.depth"; + public static final String FS_AZURE_ALWAYS_READ_BUFFER_SIZE = "fs.azure.read.alwaysReadBufferSize"; + public static final String FS_AZURE_READ_AHEAD_BLOCK_SIZE = "fs.azure.read.readahead.blocksize"; /** Provides a config control to enable or disable ABFS Flush operations - * HFlush and HSync. Default is true. **/ public static final String FS_AZURE_ENABLE_FLUSH = "fs.azure.enable.flush"; @@ -62,6 +99,8 @@ public final class ConfigurationKeys { * Default value of this config is true. **/ public static final String FS_AZURE_DISABLE_OUTPUTSTREAM_FLUSH = "fs.azure.disable.outputstream.flush"; public static final String FS_AZURE_USER_AGENT_PREFIX_KEY = "fs.azure.user.agent.prefix"; + public static final String FS_AZURE_CLUSTER_NAME = "fs.azure.cluster.name"; + public static final String FS_AZURE_CLUSTER_TYPE = "fs.azure.cluster.type"; public static final String FS_AZURE_SSL_CHANNEL_MODE_KEY = "fs.azure.ssl.channel.mode"; /** Provides a config to enable/disable the checkAccess API. * By default this will be @@ -92,6 +131,8 @@ public final class ConfigurationKeys { public static final String FS_AZURE_SKIP_SUPER_USER_REPLACEMENT = "fs.azure.identity.transformer.skip.superuser.replacement"; public static final String AZURE_KEY_ACCOUNT_KEYPROVIDER = "fs.azure.account.keyprovider"; public static final String AZURE_KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT = "fs.azure.shellkeyprovider.script"; + /** Setting this true will make the driver use it's own RemoteIterator implementation */ + public static final String FS_AZURE_ENABLE_ABFS_LIST_ITERATOR = "fs.azure.enable.abfslistiterator"; /** End point of ABFS account: {@value}. */ public static final String AZURE_ABFS_ENDPOINT = "fs.azure.abfs.endpoint"; @@ -132,5 +173,21 @@ public static String accountProperty(String property, String account) { /** Key for SAS token provider **/ public static final String FS_AZURE_SAS_TOKEN_PROVIDER_TYPE = "fs.azure.sas.token.provider.type"; + /** For performance, AbfsInputStream/AbfsOutputStream re-use SAS tokens until the expiry is within this number of seconds. **/ + public static final String FS_AZURE_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS = "fs.azure.sas.token.renew.period.for.streams"; + + /** Key to enable custom identity transformation. */ + public static final String FS_AZURE_IDENTITY_TRANSFORM_CLASS = "fs.azure.identity.transformer.class"; + /** Key for Local User to Service Principal file location. */ + public static final String FS_AZURE_LOCAL_USER_SP_MAPPING_FILE_PATH = "fs.azure.identity.transformer.local.service.principal.mapping.file.path"; + /** Key for Local Group to Service Group file location. */ + public static final String FS_AZURE_LOCAL_GROUP_SG_MAPPING_FILE_PATH = "fs.azure.identity.transformer.local.service.group.mapping.file.path"; + /** + * Optional config to enable a lock free pread which will bypass buffer in AbfsInputStream. + * This is not a config which can be set at cluster level. It can be used as + * an option on FutureDataInputStreamBuilder. + * @see FileSystem#openFile(org.apache.hadoop.fs.Path) + */ + public static final String FS_AZURE_BUFFERED_PREAD_DISABLE = "fs.azure.buffered.pread.disable"; private ConfigurationKeys() {} } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java index c6b308ed5f889..9b760c472a9ad 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java @@ -22,31 +22,51 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.EMPTY_STRING; + /** * Responsible to keep all the Azure Blob File System related configurations. */ @InterfaceAudience.Public @InterfaceStability.Evolving public final class FileSystemConfigurations { + + public static final String DEFAULT_FS_AZURE_ACCOUNT_IS_HNS_ENABLED = ""; + public static final String USER_HOME_DIRECTORY_PREFIX = "/user"; + private static final int SIXTY_SECONDS = 60 * 1000; + // Retry parameter defaults. public static final int DEFAULT_MIN_BACKOFF_INTERVAL = 3 * 1000; // 3s public static final int DEFAULT_MAX_BACKOFF_INTERVAL = 30 * 1000; // 30s public static final int DEFAULT_BACKOFF_INTERVAL = 3 * 1000; // 3s public static final int DEFAULT_MAX_RETRY_ATTEMPTS = 30; + public static final int DEFAULT_CUSTOM_TOKEN_FETCH_RETRY_COUNT = 3; + + // Retry parameter defaults. + public static final int DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_ATTEMPTS = 5; + public static final int DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF_INTERVAL = 0; + public static final int DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF_INTERVAL = SIXTY_SECONDS; + public static final int DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF = 2; - private static final int ONE_KB = 1024; - private static final int ONE_MB = ONE_KB * ONE_KB; + public static final int ONE_KB = 1024; + public static final int ONE_MB = ONE_KB * ONE_KB; // Default upload and download buffer size public static final int DEFAULT_WRITE_BUFFER_SIZE = 8 * ONE_MB; // 8 MB + public static final int APPENDBLOB_MAX_WRITE_BUFFER_SIZE = 4 * ONE_MB; // 4 MB + public static final boolean DEFAULT_AZURE_ENABLE_SMALL_WRITE_OPTIMIZATION = false; public static final int DEFAULT_READ_BUFFER_SIZE = 4 * ONE_MB; // 4 MB + public static final boolean DEFAULT_READ_SMALL_FILES_COMPLETELY = false; + public static final boolean DEFAULT_OPTIMIZE_FOOTER_READ = false; + public static final boolean DEFAULT_ALWAYS_READ_BUFFER_SIZE = false; + public static final int DEFAULT_READ_AHEAD_BLOCK_SIZE = 4 * ONE_MB; public static final int MIN_BUFFER_SIZE = 16 * ONE_KB; // 16 KB public static final int MAX_BUFFER_SIZE = 100 * ONE_MB; // 100 MB public static final long MAX_AZURE_BLOCK_SIZE = 256 * 1024 * 1024L; // changing default abfs blocksize to 256MB public static final String AZURE_BLOCK_LOCATION_HOST_DEFAULT = "localhost"; - public static final int DEFAULT_AZURE_LIST_MAX_RESULTS = 500; + public static final int DEFAULT_AZURE_LIST_MAX_RESULTS = 5000; public static final int MAX_CONCURRENT_READ_THREADS = 12; public static final int MAX_CONCURRENT_WRITE_THREADS = 8; @@ -55,8 +75,11 @@ public final class FileSystemConfigurations { public static final boolean DEFAULT_AZURE_SKIP_USER_GROUP_METADATA_DURING_INITIALIZATION = false; public static final String DEFAULT_FS_AZURE_ATOMIC_RENAME_DIRECTORIES = "/hbase"; + public static final boolean DEFAULT_FS_AZURE_ENABLE_CONDITIONAL_CREATE_OVERWRITE = true; + public static final String DEFAULT_FS_AZURE_APPEND_BLOB_DIRECTORIES = ""; public static final int DEFAULT_READ_AHEAD_QUEUE_DEPTH = -1; + public static final boolean DEFAULT_ENABLE_FLUSH = true; public static final boolean DEFAULT_DISABLE_OUTPUTSTREAM_FLUSH = true; public static final boolean DEFAULT_ENABLE_AUTOTHROTTLING = true; @@ -68,8 +91,17 @@ public final class FileSystemConfigurations { public static final boolean DEFAULT_ENABLE_HTTPS = true; public static final boolean DEFAULT_USE_UPN = false; - public static final boolean DEFAULT_ENABLE_CHECK_ACCESS = false; + public static final boolean DEFAULT_ENABLE_CHECK_ACCESS = true; public static final boolean DEFAULT_ABFS_LATENCY_TRACK = false; + public static final long DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS = 120; + + public static final String DEFAULT_FS_AZURE_USER_AGENT_PREFIX = EMPTY_STRING; + public static final String DEFAULT_VALUE_UNKNOWN = "UNKNOWN"; + + public static final boolean DEFAULT_DELETE_CONSIDERED_IDEMPOTENT = true; + public static final int DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS = 5 * 60 * 1000; // 5 mins + + public static final boolean DEFAULT_ENABLE_ABFS_LIST_ITERATOR = true; private FileSystemConfigurations() {} } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java index 9f735f729cb56..8a4ca90f35870 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java @@ -36,8 +36,10 @@ public final class HttpQueryParams { public static final String QUERY_PARAM_POSITION = "position"; public static final String QUERY_PARAM_TIMEOUT = "timeout"; public static final String QUERY_PARAM_RETAIN_UNCOMMITTED_DATA = "retainUncommittedData"; + public static final String QUERY_PARAM_FLUSH = "flush"; public static final String QUERY_PARAM_CLOSE = "close"; public static final String QUERY_PARAM_UPN = "upn"; + public static final String QUERY_PARAM_BLOBTYPE = "blobtype"; private HttpQueryParams() {} } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AbfsRestOperationException.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AbfsRestOperationException.java index 73b98942d026a..61b625657c14e 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AbfsRestOperationException.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AbfsRestOperationException.java @@ -87,7 +87,7 @@ private static String formatMessage(final AbfsHttpOperation abfsHttpOperation) { "Operation failed: \"%1$s\", %2$s, HEAD, %3$s", abfsHttpOperation.getStatusDescription(), abfsHttpOperation.getStatusCode(), - abfsHttpOperation.getUrl().toString()); + abfsHttpOperation.getSignatureMaskedUrl()); } return String.format( @@ -95,7 +95,7 @@ private static String formatMessage(final AbfsHttpOperation abfsHttpOperation) { abfsHttpOperation.getStatusDescription(), abfsHttpOperation.getStatusCode(), abfsHttpOperation.getMethod(), - abfsHttpOperation.getUrl().toString(), + abfsHttpOperation.getSignatureMaskedUrl(), abfsHttpOperation.getStorageErrorCode(), // Remove break line to ensure the request id and timestamp can be shown in console. abfsHttpOperation.getStorageErrorMessage().replaceAll("\\n", " ")); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/ConcurrentWriteOperationDetectedException.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/ConcurrentWriteOperationDetectedException.java new file mode 100644 index 0000000000000..79813ddfe6400 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/ConcurrentWriteOperationDetectedException.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.contracts.exceptions; + +/** + * Thrown when a concurrent write operation is detected. + */ +@org.apache.hadoop.classification.InterfaceAudience.Public +@org.apache.hadoop.classification.InterfaceStability.Evolving +public class ConcurrentWriteOperationDetectedException + extends AzureBlobFileSystemException { + + public ConcurrentWriteOperationDetectedException(String message) { + super(message); + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/TrileanConversionException.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/TrileanConversionException.java new file mode 100644 index 0000000000000..87eb05cdc6270 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/TrileanConversionException.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.contracts.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Thrown when tried to convert Trilean.UNKNOWN to boolean. Only Trilean.TRUE + * and Trilean.FALSE can be converted to boolean. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public final class TrileanConversionException + extends AzureBlobFileSystemException { + public TrileanConversionException() { + super("Cannot convert Trilean.UNKNOWN to boolean"); + } + +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AppendRequestParameters.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AppendRequestParameters.java new file mode 100644 index 0000000000000..fb4d29f87949a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/AppendRequestParameters.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.contracts.services; + +/** + * Saves the different request parameters for append + */ +public class AppendRequestParameters { + public enum Mode { + APPEND_MODE, + FLUSH_MODE, + FLUSH_CLOSE_MODE + } + + private final long position; + private final int offset; + private final int length; + private final Mode mode; + private final boolean isAppendBlob; + + public AppendRequestParameters(final long position, + final int offset, + final int length, + final Mode mode, + final boolean isAppendBlob) { + this.position = position; + this.offset = offset; + this.length = length; + this.mode = mode; + this.isAppendBlob = isAppendBlob; + } + + public long getPosition() { + return this.position; + } + + public int getoffset() { + return this.offset; + } + + public int getLength() { + return this.length; + } + + public Mode getMode() { + return this.mode; + } + + public boolean isAppendBlob() { + return this.isAppendBlob; + } + +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultEntrySchema.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultEntrySchema.java index 1de9dfaeeb910..cdf3decdc98bc 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultEntrySchema.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultEntrySchema.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.azurebfs.contracts.services; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; import org.codehaus.jackson.annotate.JsonProperty; import org.apache.hadoop.classification.InterfaceStability; @@ -26,6 +27,7 @@ * The ListResultEntrySchema model. */ @InterfaceStability.Evolving +@JsonIgnoreProperties(ignoreUnknown = true) public class ListResultEntrySchema { /** * The name property. diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultSchema.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultSchema.java index 32597423c86ff..e3519fb429bff 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultSchema.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/services/ListResultSchema.java @@ -20,6 +20,7 @@ import java.util.List; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; import org.codehaus.jackson.annotate.JsonProperty; import org.apache.hadoop.classification.InterfaceStability; @@ -28,6 +29,7 @@ * The ListResultSchema model. */ @InterfaceStability.Evolving +@JsonIgnoreProperties(ignoreUnknown = true) public class ListResultSchema { /** * The paths property. diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/enums/Trilean.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/enums/Trilean.java new file mode 100644 index 0000000000000..dc5f43913fb09 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/enums/Trilean.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.enums; + +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TrileanConversionException; + +/** + * Enum to represent 3 values, TRUE, FALSE and UNKNOWN. Can be used where + * boolean is not enough to hold the information. + */ +public enum Trilean { + + FALSE, TRUE, UNKNOWN; + + private static final String TRUE_STR = "true"; + private static final String FALSE_STR = "false"; + + /** + * Converts boolean to Trilean. + * + * @param isTrue the boolean to convert. + * @return the corresponding Trilean for the passed boolean isTrue. + */ + public static Trilean getTrilean(final boolean isTrue) { + if (isTrue) { + return Trilean.TRUE; + } + + return Trilean.FALSE; + } + + /** + * Converts String to Trilean. + * + * @param str the string to convert. + * @return the corresponding Trilean for the passed string str. + */ + public static Trilean getTrilean(String str) { + if (TRUE_STR.equalsIgnoreCase(str)) { + return Trilean.TRUE; + } + + if (FALSE_STR.equalsIgnoreCase(str)) { + return Trilean.FALSE; + } + + return Trilean.UNKNOWN; + } + + /** + * Converts the Trilean enum to boolean. + * + * @return the corresponding boolean. + * @throws TrileanConversionException when tried to convert Trilean.UNKNOWN. + */ + public boolean toBoolean() throws TrileanConversionException { + if (this == Trilean.UNKNOWN) { + throw new TrileanConversionException(); + } + + return Boolean.valueOf(this.name()); + } + +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/enums/package-info.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/enums/package-info.java new file mode 100644 index 0000000000000..b2a9b0f468de3 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/enums/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +package org.apache.hadoop.fs.azurebfs.enums; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/extensions/SASTokenProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/extensions/SASTokenProvider.java index 9cfe2bc12ed10..a2cd292b0b230 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/extensions/SASTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/extensions/SASTokenProvider.java @@ -32,22 +32,23 @@ @InterfaceStability.Unstable public interface SASTokenProvider { - String CONCAT_SOURCE_OPERATION = "concat-source"; - String CONCAT_TARGET_OPERATION = "concat-target"; - String CREATEFILE_OPERATION = "create"; + String CHECK_ACCESS_OPERATION = "check-access"; + String CREATE_DIRECTORY_OPERATION = "create-directory"; + String CREATE_FILE_OPERATION = "create-file"; String DELETE_OPERATION = "delete"; - String EXECUTE_OPERATION = "execute"; - String GETACL_OPERATION = "getaclstatus"; - String GETFILESTATUS_OPERATION = "getfilestatus"; - String LISTSTATUS_OPERATION = "liststatus"; - String MKDIR_OPERATION = "mkdir"; + String DELETE_RECURSIVE_OPERATION = "delete-recursive"; + String GET_ACL_OPERATION = "get-acl"; + String GET_STATUS_OPERATION = "get-status"; + String GET_PROPERTIES_OPERATION = "get-properties"; + String LIST_OPERATION = "list"; String READ_OPERATION = "read"; String RENAME_SOURCE_OPERATION = "rename-source"; String RENAME_DESTINATION_OPERATION = "rename-destination"; - String SETACL_OPERATION = "setacl"; - String SETOWNER_OPERATION = "setowner"; - String SETPERMISSION_OPERATION = "setpermission"; - String APPEND_OPERATION = "write"; + String SET_ACL_OPERATION = "set-acl"; + String SET_OWNER_OPERATION = "set-owner"; + String SET_PERMISSION_OPERATION = "set-permission"; + String SET_PROPERTIES_OPERATION = "set-properties"; + String WRITE_OPERATION = "write"; /** * Initialize authorizer for Azure Blob File System. diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java index 93c40a7120677..49f90feb22e95 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java @@ -18,22 +18,25 @@ package org.apache.hadoop.fs.azurebfs.oauth2; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; +import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.Date; import java.util.Hashtable; import java.util.Map; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonParser; import org.codehaus.jackson.JsonToken; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.azurebfs.services.AbfsIoUtils; @@ -52,13 +55,20 @@ public final class AzureADAuthenticator { private static final Logger LOG = LoggerFactory.getLogger(AzureADAuthenticator.class); private static final String RESOURCE_NAME = "https://storage.azure.com/"; + private static final String SCOPE = "https://storage.azure.com/.default"; private static final int CONNECT_TIMEOUT = 30 * 1000; private static final int READ_TIMEOUT = 30 * 1000; + private static ExponentialRetryPolicy tokenFetchRetryPolicy; + private AzureADAuthenticator() { // no operation } + public static void init(AbfsConfiguration abfsConfiguration) { + tokenFetchRetryPolicy = abfsConfiguration.getOauthTokenFetchRetryPolicy(); + } + /** * gets Azure Active Directory token using the user ID and password of * a service principal (that is, Web App in Azure Active Directory). @@ -80,14 +90,18 @@ private AzureADAuthenticator() { * @throws IOException throws IOException if there is a failure in connecting to Azure AD */ public static AzureADToken getTokenUsingClientCreds(String authEndpoint, - String clientId, String clientSecret) - throws IOException { + String clientId, String clientSecret) throws IOException { Preconditions.checkNotNull(authEndpoint, "authEndpoint"); Preconditions.checkNotNull(clientId, "clientId"); Preconditions.checkNotNull(clientSecret, "clientSecret"); + boolean isVersion2AuthenticationEndpoint = authEndpoint.contains("/oauth2/v2.0/"); QueryParams qp = new QueryParams(); - qp.add("resource", RESOURCE_NAME); + if (isVersion2AuthenticationEndpoint) { + qp.add("scope", SCOPE); + } else { + qp.add("resource", RESOURCE_NAME); + } qp.add("grant_type", "client_credentials"); qp.add("client_id", clientId); qp.add("client_secret", clientSecret); @@ -230,12 +244,23 @@ public String getMessage() { final StringBuilder sb = new StringBuilder(); sb.append("HTTP Error "); sb.append(httpErrorCode); - sb.append("; url='").append(url).append('\''); - sb.append(' '); + if (!url.isEmpty()) { + sb.append("; url='").append(url).append('\'').append(' '); + } + sb.append(super.getMessage()); - sb.append("; requestId='").append(requestId).append('\''); - sb.append("; contentType='").append(contentType).append('\''); - sb.append("; response '").append(body).append('\''); + if (!requestId.isEmpty()) { + sb.append("; requestId='").append(requestId).append('\''); + } + + if (!contentType.isEmpty()) { + sb.append("; contentType='").append(contentType).append('\''); + } + + if (!body.isEmpty()) { + sb.append("; response '").append(body).append('\''); + } + return sb.toString(); } } @@ -266,13 +291,14 @@ private static AzureADToken getTokenCall(String authEndpoint, String body, Hashtable headers, String httpMethod, boolean isMsi) throws IOException { AzureADToken token = null; - ExponentialRetryPolicy retryPolicy - = new ExponentialRetryPolicy(3, 0, 1000, 2); int httperror = 0; IOException ex = null; boolean succeeded = false; + boolean isRecoverableFailure = true; int retryCount = 0; + boolean shouldRetry; + LOG.trace("First execution of REST operation getTokenSingleCall"); do { httperror = 0; ex = null; @@ -282,17 +308,38 @@ private static AzureADToken getTokenCall(String authEndpoint, String body, httperror = e.httpErrorCode; ex = e; } catch (IOException e) { - ex = e; + httperror = -1; + isRecoverableFailure = isRecoverableFailure(e); + ex = new HttpException(httperror, "", String + .format("AzureADAuthenticator.getTokenCall threw %s : %s", + e.getClass().getTypeName(), e.getMessage()), authEndpoint, "", + ""); } succeeded = ((httperror == 0) && (ex == null)); + shouldRetry = !succeeded && isRecoverableFailure + && tokenFetchRetryPolicy.shouldRetry(retryCount, httperror); retryCount++; - } while (!succeeded && retryPolicy.shouldRetry(retryCount, httperror)); + if (shouldRetry) { + LOG.debug("Retrying getTokenSingleCall. RetryCount = {}", retryCount); + try { + Thread.sleep(tokenFetchRetryPolicy.getRetryInterval(retryCount)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + } while (shouldRetry); if (!succeeded) { throw ex; } return token; } + private static boolean isRecoverableFailure(IOException e) { + return !(e instanceof MalformedURLException + || e instanceof FileNotFoundException); + } + private static AzureADToken getTokenSingleCall(String authEndpoint, String payload, Hashtable headers, String httpMethod, boolean isMsi) diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/ClientCredsTokenProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/ClientCredsTokenProvider.java index 9a46018ec6217..d432e3b2ce0c5 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/ClientCredsTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/ClientCredsTokenProvider.java @@ -20,7 +20,7 @@ import java.io.IOException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/CustomTokenProviderAdapter.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/CustomTokenProviderAdapter.java index 37cfa6f1d2910..14914101e5cca 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/CustomTokenProviderAdapter.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/CustomTokenProviderAdapter.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.net.URI; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.azurebfs.extensions.BoundDTExtension; import org.apache.hadoop.fs.azurebfs.extensions.CustomTokenProviderAdaptee; import org.apache.hadoop.fs.azurebfs.extensions.ExtensionHelper; +import org.apache.hadoop.fs.azurebfs.oauth2.AzureADAuthenticator.HttpException; /** * Provides tokens based on custom implementation, following the Adapter Design @@ -38,6 +39,7 @@ public final class CustomTokenProviderAdapter extends AccessTokenProvider implements BoundDTExtension { + private final int fetchTokenRetryCount; private CustomTokenProviderAdaptee adaptee; private static final Logger LOG = LoggerFactory.getLogger(AccessTokenProvider.class); @@ -45,17 +47,57 @@ public final class CustomTokenProviderAdapter extends AccessTokenProvider * Constructs a token provider based on the custom token provider. * * @param adaptee the custom token provider + * @param customTokenFetchRetryCount max retry count for customTokenFetch */ - public CustomTokenProviderAdapter(CustomTokenProviderAdaptee adaptee) { + public CustomTokenProviderAdapter(CustomTokenProviderAdaptee adaptee, int customTokenFetchRetryCount) { Preconditions.checkNotNull(adaptee, "adaptee"); this.adaptee = adaptee; + fetchTokenRetryCount = customTokenFetchRetryCount; } protected AzureADToken refreshToken() throws IOException { LOG.debug("AADToken: refreshing custom based token"); AzureADToken azureADToken = new AzureADToken(); - azureADToken.setAccessToken(adaptee.getAccessToken()); + + String accessToken = null; + + Exception ex; + boolean succeeded = false; + // Custom token providers should have their own retry policies, + // Providing a linear retry option for the the retry count + // mentioned in config "fs.azure.custom.token.fetch.retry.count" + int retryCount = fetchTokenRetryCount; + do { + ex = null; + try { + accessToken = adaptee.getAccessToken(); + LOG.trace("CustomTokenProvider Access token fetch was successful with retry count {}", + (fetchTokenRetryCount - retryCount)); + } catch (Exception e) { + LOG.debug("CustomTokenProvider Access token fetch failed with retry count {}", + (fetchTokenRetryCount - retryCount)); + ex = e; + } + + succeeded = (ex == null); + retryCount--; + } while (!succeeded && (retryCount) >= 0); + + if (!succeeded) { + HttpException httpEx = new HttpException( + -1, + "", + String.format("CustomTokenProvider getAccessToken threw %s : %s", + ex.getClass().getTypeName(), ex.getMessage()), + "", + "", + "" + ); + throw httpEx; + } + + azureADToken.setAccessToken(accessToken); azureADToken.setExpiry(adaptee.getExpiryTime()); return azureADToken; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/IdentityTransformer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/IdentityTransformer.java index 6844afb9b2a54..dea2f96b60f0a 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/IdentityTransformer.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/IdentityTransformer.java @@ -21,7 +21,7 @@ import java.util.List; import java.util.Locale; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,7 +42,7 @@ /** * Perform transformation for Azure Active Directory identities used in owner, group and acls. */ -public class IdentityTransformer { +public class IdentityTransformer implements IdentityTransformerInterface { private static final Logger LOG = LoggerFactory.getLogger(IdentityTransformer.class); private boolean isSecure; @@ -100,7 +100,8 @@ public IdentityTransformer(Configuration configuration) throws IOException { * @param localIdentity the local user or group, should be parsed from UserGroupInformation. * @return owner or group after transformation. * */ - public String transformIdentityForGetRequest(String originalIdentity, boolean isUserName, String localIdentity) { + public String transformIdentityForGetRequest(String originalIdentity, boolean isUserName, String localIdentity) + throws IOException { if (originalIdentity == null) { originalIdentity = localIdentity; // localIdentity might be a full name, so continue the transformation. @@ -198,7 +199,7 @@ public void transformAclEntriesForSetRequest(final List aclEntries) { if (isInSubstitutionList(name)) { transformedName = servicePrincipalId; } else if (aclEntry.getType().equals(AclEntryType.USER) // case 2: when the owner is a short name - && shouldUseFullyQualifiedUserName(name)) { // of the user principal name (UPN). + && shouldUseFullyQualifiedUserName(name)) { // of the user principal name (UPN). // Notice: for group type ACL entry, if name is shortName. // It won't be converted to Full Name. This is // to make the behavior consistent with HDI. @@ -242,7 +243,8 @@ && shouldUseFullyQualifiedUserName(name)) { // of the user principal * @param localUser local user name * @param localGroup local primary group * */ - public void transformAclEntriesForGetRequest(final List aclEntries, String localUser, String localGroup) { + public void transformAclEntriesForGetRequest(final List aclEntries, String localUser, String localGroup) + throws IOException { if (skipUserIdentityReplacement) { return; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/IdentityTransformerInterface.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/IdentityTransformerInterface.java new file mode 100644 index 0000000000000..00f93eae30bd4 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/IdentityTransformerInterface.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs.oauth2; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.fs.permission.AclEntry; + +/** + * {@code IdentityTransformerInterface} defines the set of translation + * operations that any identity transformer implementation must provide. + */ +public interface IdentityTransformerInterface { + + /** + * Perform identity transformation for the Get request. + * @param originalIdentity the original user or group in the get request. + * @param isUserName indicate whether the input originalIdentity is an owner name or owning group name. + * @param localIdentity the local user or group, should be parsed from UserGroupInformation. + * @return owner or group after transformation. + */ + String transformIdentityForGetRequest(String originalIdentity, boolean isUserName, String localIdentity) + throws IOException; + + /** + * Perform Identity transformation when setting owner on a path. + * @param userOrGroup the user or group to be set as owner. + * @return user or group after transformation. + */ + String transformUserOrGroupForSetRequest(String userOrGroup); + + /** + * Perform Identity transformation when calling setAcl(),removeAclEntries() and modifyAclEntries(). + * @param aclEntries list of AclEntry. + */ + void transformAclEntriesForSetRequest(final List aclEntries); + + /** + * Perform Identity transformation when calling GetAclStatus(). + * @param aclEntries list of AclEntry. + * @param localUser local user name. + * @param localGroup local primary group. + */ + void transformAclEntriesForGetRequest(final List aclEntries, String localUser, String localGroup) + throws IOException; +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/LocalIdentityTransformer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/LocalIdentityTransformer.java new file mode 100644 index 0000000000000..5d5371014b761 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/LocalIdentityTransformer.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs.oauth2; + +import java.io.IOException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.azurebfs.utils.IdentityHandler; +import org.apache.hadoop.fs.azurebfs.utils.TextFileBasedIdentityHandler; + +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_LOCAL_USER_SP_MAPPING_FILE_PATH; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_LOCAL_GROUP_SG_MAPPING_FILE_PATH; + + +/** + * A subclass of {@link IdentityTransformer} that translates the AAD to Local + * identity using {@link IdentityHandler}. + * + * {@link TextFileBasedIdentityHandler} is a {@link IdentityHandler} implements + * translation operation which returns identity mapped to AAD identity. + */ +public class LocalIdentityTransformer extends IdentityTransformer { + + private static final Logger LOG = LoggerFactory.getLogger(LocalIdentityTransformer.class); + + private IdentityHandler localToAadIdentityLookup; + + public LocalIdentityTransformer(Configuration configuration) throws IOException { + super(configuration); + this.localToAadIdentityLookup = + new TextFileBasedIdentityHandler(configuration.get(FS_AZURE_LOCAL_USER_SP_MAPPING_FILE_PATH), + configuration.get(FS_AZURE_LOCAL_GROUP_SG_MAPPING_FILE_PATH)); + } + + /** + * Perform identity transformation for the Get request results. + * @param originalIdentity the original user or group in the get request results: FileStatus, AclStatus. + * @param isUserName indicate whether the input originalIdentity is an owner name or owning group name. + * @param localIdentity the local user or group, should be parsed from UserGroupInformation. + * @return local identity. + */ + @Override + public String transformIdentityForGetRequest(String originalIdentity, boolean isUserName, String localIdentity) + throws IOException { + String localIdentityForOrig = isUserName ? localToAadIdentityLookup.lookupForLocalUserIdentity(originalIdentity) + : localToAadIdentityLookup.lookupForLocalGroupIdentity(originalIdentity); + + if (localIdentityForOrig == null || localIdentityForOrig.isEmpty()) { + return super.transformIdentityForGetRequest(originalIdentity, isUserName, localIdentity); + } + + return localIdentityForOrig; + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/RefreshTokenBasedTokenProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/RefreshTokenBasedTokenProvider.java index 1c1bd2b3b5511..4f35d1522d99f 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/RefreshTokenBasedTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/RefreshTokenBasedTokenProvider.java @@ -20,7 +20,7 @@ import java.io.IOException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/UserPasswordTokenProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/UserPasswordTokenProvider.java index 3dad32ec6f51a..3d9d8b865a059 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/UserPasswordTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/UserPasswordTokenProvider.java @@ -19,7 +19,7 @@ import java.io.IOException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/security/AbfsDelegationTokenManager.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/security/AbfsDelegationTokenManager.java index c8d6b803f4650..46d5505217378 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/security/AbfsDelegationTokenManager.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/security/AbfsDelegationTokenManager.java @@ -23,8 +23,8 @@ import java.io.IOException; import java.net.URI; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index 6e1de68b5de51..bfc11a676ae48 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -21,14 +21,17 @@ import java.io.Closeable; import java.io.IOException; import java.io.UnsupportedEncodingException; +import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; import java.net.URLEncoder; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Locale; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; @@ -42,10 +45,13 @@ import org.apache.hadoop.fs.azurebfs.extensions.ExtensionHelper; import org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider; import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters; import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; +import org.apache.hadoop.fs.azurebfs.utils.DateTimeUtils; import org.apache.hadoop.io.IOUtils; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.*; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_DELETE_CONSIDERED_IDEMPOTENT; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes.HTTPS_SCHEME; import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.*; import static org.apache.hadoop.fs.azurebfs.constants.HttpQueryParams.*; @@ -57,7 +63,7 @@ public class AbfsClient implements Closeable { public static final Logger LOG = LoggerFactory.getLogger(AbfsClient.class); private final URL baseUrl; private final SharedKeyCredentials sharedKeyCredentials; - private final String xMsVersion = "2018-11-09"; + private final String xMsVersion = "2019-12-12"; private final ExponentialRetryPolicy retryPolicy; private final String filesystem; private final AbfsConfiguration abfsConfiguration; @@ -68,17 +74,17 @@ public class AbfsClient implements Closeable { private final AuthType authType; private AccessTokenProvider tokenProvider; private SASTokenProvider sasTokenProvider; + private final AbfsCounters abfsCounters; private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, - final ExponentialRetryPolicy exponentialRetryPolicy, - final AbfsPerfTracker abfsPerfTracker) { + final AbfsClientContext abfsClientContext) { this.baseUrl = baseUrl; this.sharedKeyCredentials = sharedKeyCredentials; String baseUrlString = baseUrl.toString(); this.filesystem = baseUrlString.substring(baseUrlString.lastIndexOf(FORWARD_SLASH) + 1); this.abfsConfiguration = abfsConfiguration; - this.retryPolicy = exponentialRetryPolicy; + this.retryPolicy = abfsClientContext.getExponentialRetryPolicy(); this.accountName = abfsConfiguration.getAccountName().substring(0, abfsConfiguration.getAccountName().indexOf(AbfsHttpConstants.DOT)); this.authType = abfsConfiguration.getAuthType(accountName); @@ -98,24 +104,23 @@ private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCreden } this.userAgent = initializeUserAgent(abfsConfiguration, sslProviderName); - this.abfsPerfTracker = abfsPerfTracker; + this.abfsPerfTracker = abfsClientContext.getAbfsPerfTracker(); + this.abfsCounters = abfsClientContext.getAbfsCounters(); } public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, - final ExponentialRetryPolicy exponentialRetryPolicy, final AccessTokenProvider tokenProvider, - final AbfsPerfTracker abfsPerfTracker) { - this(baseUrl, sharedKeyCredentials, abfsConfiguration, exponentialRetryPolicy, abfsPerfTracker); + final AbfsClientContext abfsClientContext) { + this(baseUrl, sharedKeyCredentials, abfsConfiguration, abfsClientContext); this.tokenProvider = tokenProvider; } public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials, final AbfsConfiguration abfsConfiguration, - final ExponentialRetryPolicy exponentialRetryPolicy, final SASTokenProvider sasTokenProvider, - final AbfsPerfTracker abfsPerfTracker) { - this(baseUrl, sharedKeyCredentials, abfsConfiguration, exponentialRetryPolicy, abfsPerfTracker); + final AbfsClientContext abfsClientContext) { + this(baseUrl, sharedKeyCredentials, abfsConfiguration, abfsClientContext); this.sasTokenProvider = sasTokenProvider; } @@ -207,13 +212,12 @@ public AbfsRestOperation listPath(final String relativePath, final boolean recur final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_RESOURCE, FILESYSTEM); - abfsUriQueryBuilder.addQuery(QUERY_PARAM_DIRECTORY, relativePath == null ? AbfsHttpConstants.EMPTY_STRING - : relativePath); + abfsUriQueryBuilder.addQuery(QUERY_PARAM_DIRECTORY, getDirectoryQueryParameter(relativePath)); abfsUriQueryBuilder.addQuery(QUERY_PARAM_RECURSIVE, String.valueOf(recursive)); abfsUriQueryBuilder.addQuery(QUERY_PARAM_CONTINUATION, continuation); abfsUriQueryBuilder.addQuery(QUERY_PARAM_MAXRESULTS, String.valueOf(listMaxResults)); abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_UPN, String.valueOf(abfsConfiguration.isUpnUsed())); - appendSASTokenToQuery(relativePath, SASTokenProvider.LISTSTATUS_OPERATION, abfsUriQueryBuilder); + appendSASTokenToQuery(relativePath, SASTokenProvider.LIST_OPERATION, abfsUriQueryBuilder); final URL url = createRequestUrl(abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( @@ -261,7 +265,8 @@ public AbfsRestOperation deleteFilesystem() throws AzureBlobFileSystemException } public AbfsRestOperation createPath(final String path, final boolean isFile, final boolean overwrite, - final String permission, final String umask) throws AzureBlobFileSystemException { + final String permission, final String umask, + final boolean isAppendBlob, final String eTag) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); if (!overwrite) { requestHeaders.add(new AbfsHttpHeader(IF_NONE_MATCH, AbfsHttpConstants.STAR)); @@ -275,12 +280,19 @@ public AbfsRestOperation createPath(final String path, final boolean isFile, fin requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_UMASK, umask)); } + if (eTag != null && !eTag.isEmpty()) { + requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.IF_MATCH, eTag)); + } + final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_RESOURCE, isFile ? FILE : DIRECTORY); + if (isAppendBlob) { + abfsUriQueryBuilder.addQuery(QUERY_PARAM_BLOBTYPE, APPEND_BLOB_TYPE); + } String operation = isFile - ? SASTokenProvider.CREATEFILE_OPERATION - : SASTokenProvider.MKDIR_OPERATION; + ? SASTokenProvider.CREATE_FILE_OPERATION + : SASTokenProvider.CREATE_DIRECTORY_OPERATION; appendSASTokenToQuery(path, operation, abfsUriQueryBuilder); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); @@ -320,35 +332,156 @@ public AbfsRestOperation renamePath(String source, final String destination, fin HTTP_METHOD_PUT, url, requestHeaders); - op.execute(); + Instant renameRequestStartTime = Instant.now(); + try { + op.execute(); + } catch (AzureBlobFileSystemException e) { + final AbfsRestOperation idempotencyOp = renameIdempotencyCheckOp( + renameRequestStartTime, op, destination); + if (idempotencyOp.getResult().getStatusCode() + == op.getResult().getStatusCode()) { + // idempotency did not return different result + // throw back the exception + throw e; + } else { + return idempotencyOp; + } + } + + return op; + } + + /** + * Check if the rename request failure is post a retry and if earlier rename + * request might have succeeded at back-end. + * + * If there is a parallel rename activity happening from any other store + * interface, the logic here will detect the rename to have happened due to + * the one initiated from this ABFS filesytem instance as it was retried. This + * should be a corner case hence going ahead with LMT check. + * @param renameRequestStartTime startTime for the rename request + * @param op Rename request REST operation response + * @param destination rename destination path + * @return REST operation response post idempotency check + * @throws AzureBlobFileSystemException if GetFileStatus hits any exception + */ + public AbfsRestOperation renameIdempotencyCheckOp( + final Instant renameRequestStartTime, + final AbfsRestOperation op, + final String destination) throws AzureBlobFileSystemException { + if ((op.isARetriedRequest()) + && (op.getResult().getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND)) { + // Server has returned HTTP 404, which means rename source no longer + // exists. Check on destination status and if it has a recent LMT timestamp. + // If yes, return success, else fall back to original rename request failure response. + + try { + final AbfsRestOperation destStatusOp = getPathStatus(destination, + false); + if (destStatusOp.getResult().getStatusCode() + == HttpURLConnection.HTTP_OK) { + String lmt = destStatusOp.getResult().getResponseHeader( + HttpHeaderConfigurations.LAST_MODIFIED); + + if (DateTimeUtils.isRecentlyModified(lmt, renameRequestStartTime)) { + LOG.debug("Returning success response from rename idempotency logic"); + return destStatusOp; + } + } + } catch (AzureBlobFileSystemException e) { + // GetFileStatus on the destination failed, return original op + return op; + } + } + return op; } - public AbfsRestOperation append(final String path, final long position, final byte[] buffer, final int offset, - final int length) throws AzureBlobFileSystemException { + public AbfsRestOperation append(final String path, final byte[] buffer, + AppendRequestParameters reqParams, final String cachedSasToken) + throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); // JDK7 does not support PATCH, so to workaround the issue we will use // PUT and specify the real method in the X-Http-Method-Override header. requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE, - HTTP_METHOD_PATCH)); + HTTP_METHOD_PATCH)); final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, APPEND_ACTION); - abfsUriQueryBuilder.addQuery(QUERY_PARAM_POSITION, Long.toString(position)); - appendSASTokenToQuery(path, SASTokenProvider.APPEND_OPERATION, abfsUriQueryBuilder); + abfsUriQueryBuilder.addQuery(QUERY_PARAM_POSITION, Long.toString(reqParams.getPosition())); + + if ((reqParams.getMode() == AppendRequestParameters.Mode.FLUSH_MODE) || ( + reqParams.getMode() == AppendRequestParameters.Mode.FLUSH_CLOSE_MODE)) { + abfsUriQueryBuilder.addQuery(QUERY_PARAM_FLUSH, TRUE); + if (reqParams.getMode() == AppendRequestParameters.Mode.FLUSH_CLOSE_MODE) { + abfsUriQueryBuilder.addQuery(QUERY_PARAM_CLOSE, TRUE); + } + } + + // AbfsInputStream/AbfsOutputStream reuse SAS tokens for better performance + String sasTokenForReuse = appendSASTokenToQuery(path, SASTokenProvider.WRITE_OPERATION, + abfsUriQueryBuilder, cachedSasToken); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( AbfsRestOperationType.Append, + this, + HTTP_METHOD_PUT, + url, + requestHeaders, + buffer, + reqParams.getoffset(), + reqParams.getLength(), + sasTokenForReuse); + try { + op.execute(); + } catch (AzureBlobFileSystemException e) { + if (reqParams.isAppendBlob() + && appendSuccessCheckOp(op, path, + (reqParams.getPosition() + reqParams.getLength()))) { + final AbfsRestOperation successOp = new AbfsRestOperation( + AbfsRestOperationType.Append, this, HTTP_METHOD_PUT, url, - requestHeaders, buffer, offset, length); - op.execute(); + requestHeaders, + buffer, + reqParams.getoffset(), + reqParams.getLength(), + sasTokenForReuse); + successOp.hardSetResult(HttpURLConnection.HTTP_OK); + return successOp; + } + throw e; + } + return op; } - public AbfsRestOperation flush(final String path, final long position, boolean retainUncommittedData, boolean isClose) + // For AppendBlob its possible that the append succeeded in the backend but the request failed. + // However a retry would fail with an InvalidQueryParameterValue + // (as the current offset would be unacceptable). + // Hence, we pass/succeed the appendblob append call + // in case we are doing a retry after checking the length of the file + public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path, + final long length) throws AzureBlobFileSystemException { + if ((op.isARetriedRequest()) + && (op.getResult().getStatusCode() == HttpURLConnection.HTTP_BAD_REQUEST)) { + final AbfsRestOperation destStatusOp = getPathStatus(path, false); + if (destStatusOp.getResult().getStatusCode() == HttpURLConnection.HTTP_OK) { + String fileLength = destStatusOp.getResult().getResponseHeader( + HttpHeaderConfigurations.CONTENT_LENGTH); + if (length <= Long.parseLong(fileLength)) { + LOG.debug("Returning success response from append blob idempotency code"); + return true; + } + } + } + return false; + } + + public AbfsRestOperation flush(final String path, final long position, boolean retainUncommittedData, + boolean isClose, final String cachedSasToken) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); // JDK7 does not support PATCH, so to workaround the issue we will use @@ -361,7 +494,9 @@ public AbfsRestOperation flush(final String path, final long position, boolean r abfsUriQueryBuilder.addQuery(QUERY_PARAM_POSITION, Long.toString(position)); abfsUriQueryBuilder.addQuery(QUERY_PARAM_RETAIN_UNCOMMITTED_DATA, String.valueOf(retainUncommittedData)); abfsUriQueryBuilder.addQuery(QUERY_PARAM_CLOSE, String.valueOf(isClose)); - appendSASTokenToQuery(path, SASTokenProvider.APPEND_OPERATION, abfsUriQueryBuilder); + // AbfsInputStream/AbfsOutputStream reuse SAS tokens for better performance + String sasTokenForReuse = appendSASTokenToQuery(path, SASTokenProvider.WRITE_OPERATION, + abfsUriQueryBuilder, cachedSasToken); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( @@ -369,7 +504,7 @@ public AbfsRestOperation flush(final String path, final long position, boolean r this, HTTP_METHOD_PUT, url, - requestHeaders); + requestHeaders, sasTokenForReuse); op.execute(); return op; } @@ -386,6 +521,7 @@ public AbfsRestOperation setPathProperties(final String path, final String prope final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, SET_PROPERTIES_ACTION); + appendSASTokenToQuery(path, SASTokenProvider.SET_PROPERTIES_OPERATION, abfsUriQueryBuilder); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( @@ -398,12 +534,20 @@ public AbfsRestOperation setPathProperties(final String path, final String prope return op; } - public AbfsRestOperation getPathStatus(final String path) throws AzureBlobFileSystemException { + public AbfsRestOperation getPathStatus(final String path, final boolean includeProperties) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); + String operation = SASTokenProvider.GET_PROPERTIES_OPERATION; + if (!includeProperties) { + // The default action (operation) is implicitly to get properties and this action requires read permission + // because it reads user defined properties. If the action is getStatus or getAclStatus, then + // only traversal (execute) permission is required. + abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.GET_STATUS); + operation = SASTokenProvider.GET_STATUS_OPERATION; + } abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_UPN, String.valueOf(abfsConfiguration.isUpnUsed())); - appendSASTokenToQuery(path, SASTokenProvider.GETFILESTATUS_OPERATION, abfsUriQueryBuilder); + appendSASTokenToQuery(path, operation, abfsUriQueryBuilder); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( @@ -417,14 +561,16 @@ public AbfsRestOperation getPathStatus(final String path) throws AzureBlobFileSy } public AbfsRestOperation read(final String path, final long position, final byte[] buffer, final int bufferOffset, - final int bufferLength, final String eTag) throws AzureBlobFileSystemException { + final int bufferLength, final String eTag, String cachedSasToken) throws AzureBlobFileSystemException { final List requestHeaders = createDefaultHeaders(); requestHeaders.add(new AbfsHttpHeader(RANGE, String.format("bytes=%d-%d", position, position + bufferLength - 1))); requestHeaders.add(new AbfsHttpHeader(IF_MATCH, eTag)); final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); - appendSASTokenToQuery(path, SASTokenProvider.READ_OPERATION, abfsUriQueryBuilder); + // AbfsInputStream/AbfsOutputStream reuse SAS tokens for better performance + String sasTokenForReuse = appendSASTokenToQuery(path, SASTokenProvider.READ_OPERATION, + abfsUriQueryBuilder, cachedSasToken); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); @@ -436,7 +582,7 @@ public AbfsRestOperation read(final String path, final long position, final byte requestHeaders, buffer, bufferOffset, - bufferLength); + bufferLength, sasTokenForReuse); op.execute(); return op; @@ -449,7 +595,8 @@ public AbfsRestOperation deletePath(final String path, final boolean recursive, final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_RECURSIVE, String.valueOf(recursive)); abfsUriQueryBuilder.addQuery(QUERY_PARAM_CONTINUATION, continuation); - appendSASTokenToQuery(path, SASTokenProvider.DELETE_OPERATION, abfsUriQueryBuilder); + String operation = recursive ? SASTokenProvider.DELETE_RECURSIVE_OPERATION : SASTokenProvider.DELETE_OPERATION; + appendSASTokenToQuery(path, operation, abfsUriQueryBuilder); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( @@ -458,7 +605,55 @@ public AbfsRestOperation deletePath(final String path, final boolean recursive, HTTP_METHOD_DELETE, url, requestHeaders); + try { op.execute(); + } catch (AzureBlobFileSystemException e) { + final AbfsRestOperation idempotencyOp = deleteIdempotencyCheckOp(op); + if (idempotencyOp.getResult().getStatusCode() + == op.getResult().getStatusCode()) { + // idempotency did not return different result + // throw back the exception + throw e; + } else { + return idempotencyOp; + } + } + + return op; + } + + /** + * Check if the delete request failure is post a retry and if delete failure + * qualifies to be a success response assuming idempotency. + * + * There are below scenarios where delete could be incorrectly deducted as + * success post request retry: + * 1. Target was originally not existing and initial delete request had to be + * re-tried. + * 2. Parallel delete issued from any other store interface rather than + * delete issued from this filesystem instance. + * These are few corner cases and usually returning a success at this stage + * should help the job to continue. + * @param op Delete request REST operation response + * @return REST operation response post idempotency check + */ + public AbfsRestOperation deleteIdempotencyCheckOp(final AbfsRestOperation op) { + if ((op.isARetriedRequest()) + && (op.getResult().getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) + && DEFAULT_DELETE_CONSIDERED_IDEMPOTENT) { + // Server has returned HTTP 404, which means path no longer + // exists. Assuming delete result to be idempotent, return success. + final AbfsRestOperation successOp = new AbfsRestOperation( + AbfsRestOperationType.DeletePath, + this, + HTTP_METHOD_DELETE, + op.getUrl(), + op.getRequestHeaders()); + successOp.hardSetResult(HttpURLConnection.HTTP_OK); + LOG.debug("Returning success response from delete idempotency logic"); + return successOp; + } + return op; } @@ -479,7 +674,7 @@ public AbfsRestOperation setOwner(final String path, final String owner, final S final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.SET_ACCESS_CONTROL); - appendSASTokenToQuery(path, SASTokenProvider.SETOWNER_OPERATION, abfsUriQueryBuilder); + appendSASTokenToQuery(path, SASTokenProvider.SET_OWNER_OPERATION, abfsUriQueryBuilder); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( @@ -504,7 +699,7 @@ public AbfsRestOperation setPermission(final String path, final String permissio final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.SET_ACCESS_CONTROL); - appendSASTokenToQuery(path, SASTokenProvider.SETPERMISSION_OPERATION, abfsUriQueryBuilder); + appendSASTokenToQuery(path, SASTokenProvider.SET_PERMISSION_OPERATION, abfsUriQueryBuilder); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( @@ -537,7 +732,7 @@ public AbfsRestOperation setAcl(final String path, final String aclSpecString, f final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.SET_ACCESS_CONTROL); - appendSASTokenToQuery(path, SASTokenProvider.SETACL_OPERATION, abfsUriQueryBuilder); + appendSASTokenToQuery(path, SASTokenProvider.SET_ACL_OPERATION, abfsUriQueryBuilder); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( @@ -560,7 +755,7 @@ public AbfsRestOperation getAclStatus(final String path, final boolean useUPN) t final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.GET_ACCESS_CONTROL); abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_UPN, String.valueOf(useUPN)); - appendSASTokenToQuery(path, SASTokenProvider.GETACL_OPERATION, abfsUriQueryBuilder); + appendSASTokenToQuery(path, SASTokenProvider.GET_ACL_OPERATION, abfsUriQueryBuilder); final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); final AbfsRestOperation op = new AbfsRestOperation( @@ -587,6 +782,7 @@ public AbfsRestOperation checkAccess(String path, String rwx) AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder(); abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, CHECK_ACCESS); abfsUriQueryBuilder.addQuery(QUERY_FS_ACTION, rwx); + appendSASTokenToQuery(path, SASTokenProvider.CHECK_ACCESS_OPERATION, abfsUriQueryBuilder); URL url = createRequestUrl(path, abfsUriQueryBuilder.toString()); AbfsRestOperation op = new AbfsRestOperation( AbfsRestOperationType.CheckAccess, this, @@ -595,23 +791,65 @@ public AbfsRestOperation checkAccess(String path, String rwx) return op; } + /** + * Get the directory query parameter used by the List Paths REST API and used + * as the path in the continuation token. If the input path is null or the + * root path "/", empty string is returned. If the input path begins with '/', + * the return value is the substring beginning at offset 1. Otherwise, the + * input path is returned. + * @param path the path to be listed. + * @return the value of the directory query parameter + */ + public static String getDirectoryQueryParameter(final String path) { + String directory = path; + if (Strings.isNullOrEmpty(directory)) { + directory = AbfsHttpConstants.EMPTY_STRING; + } else if (directory.charAt(0) == '/') { + directory = directory.substring(1); + } + return directory; + } + /** * If configured for SAS AuthType, appends SAS token to queryBuilder * @param path * @param operation * @param queryBuilder + * @return sasToken - returned for optional re-use. * @throws SASTokenProviderException */ - private void appendSASTokenToQuery(String path, String operation, AbfsUriQueryBuilder queryBuilder) throws SASTokenProviderException { + private String appendSASTokenToQuery(String path, String operation, AbfsUriQueryBuilder queryBuilder) throws SASTokenProviderException { + return appendSASTokenToQuery(path, operation, queryBuilder, null); + } + + /** + * If configured for SAS AuthType, appends SAS token to queryBuilder + * @param path + * @param operation + * @param queryBuilder + * @param cachedSasToken - previously acquired SAS token to be reused. + * @return sasToken - returned for optional re-use. + * @throws SASTokenProviderException + */ + private String appendSASTokenToQuery(String path, + String operation, + AbfsUriQueryBuilder queryBuilder, + String cachedSasToken) + throws SASTokenProviderException { + String sasToken = null; if (this.authType == AuthType.SAS) { try { LOG.trace("Fetch SAS token for {} on {}", operation, path); - String sasToken = sasTokenProvider.getSASToken(this.accountName, - this.filesystem, path, operation); - if ((sasToken == null) || sasToken.isEmpty()) { - throw new UnsupportedOperationException("SASToken received is empty or null"); + if (cachedSasToken == null) { + sasToken = sasTokenProvider.getSASToken(this.accountName, + this.filesystem, path, operation); + if ((sasToken == null) || sasToken.isEmpty()) { + throw new UnsupportedOperationException("SASToken received is empty or null"); + } + } else { + sasToken = cachedSasToken; + LOG.trace("Using cached SAS token."); } - queryBuilder.setSASToken(sasToken); LOG.trace("SAS token fetch complete for {} on {}", operation, path); } catch (Exception ex) { @@ -621,13 +859,15 @@ private void appendSASTokenToQuery(String path, String operation, AbfsUriQueryBu ex.toString())); } } + return sasToken; } private URL createRequestUrl(final String query) throws AzureBlobFileSystemException { return createRequestUrl(EMPTY_STRING, query); } - private URL createRequestUrl(final String path, final String query) + @VisibleForTesting + protected URL createRequestUrl(final String path, final String query) throws AzureBlobFileSystemException { final String base = baseUrl.toString(); String encodedPath = path; @@ -679,32 +919,60 @@ public AuthType getAuthType() { @VisibleForTesting String initializeUserAgent(final AbfsConfiguration abfsConfiguration, - final String sslProviderName) { + final String sslProviderName) { + StringBuilder sb = new StringBuilder(); - sb.append("(JavaJRE "); + + sb.append(APN_VERSION); + sb.append(SINGLE_WHITE_SPACE); + sb.append(CLIENT_VERSION); + sb.append(SINGLE_WHITE_SPACE); + + sb.append("("); + + sb.append(System.getProperty(JAVA_VENDOR) + .replaceAll(SINGLE_WHITE_SPACE, EMPTY_STRING)); + sb.append(SINGLE_WHITE_SPACE); + sb.append("JavaJRE"); + sb.append(SINGLE_WHITE_SPACE); sb.append(System.getProperty(JAVA_VERSION)); - sb.append("; "); - sb.append( - System.getProperty(OS_NAME).replaceAll(SINGLE_WHITE_SPACE, EMPTY_STRING)); - sb.append(" "); + sb.append(SEMICOLON); + sb.append(SINGLE_WHITE_SPACE); + + sb.append(System.getProperty(OS_NAME) + .replaceAll(SINGLE_WHITE_SPACE, EMPTY_STRING)); + sb.append(SINGLE_WHITE_SPACE); sb.append(System.getProperty(OS_VERSION)); - if (sslProviderName != null && !sslProviderName.isEmpty()) { - sb.append("; "); - sb.append(sslProviderName); - } - String tokenProviderField = - ExtensionHelper.getUserAgentSuffix(tokenProvider, ""); - if (!tokenProviderField.isEmpty()) { - sb.append("; ").append(tokenProviderField); - } + sb.append(FORWARD_SLASH); + sb.append(System.getProperty(OS_ARCH)); + sb.append(SEMICOLON); + + appendIfNotEmpty(sb, sslProviderName, true); + appendIfNotEmpty(sb, + ExtensionHelper.getUserAgentSuffix(tokenProvider, EMPTY_STRING), true); + + sb.append(SINGLE_WHITE_SPACE); + sb.append(abfsConfiguration.getClusterName()); + sb.append(FORWARD_SLASH); + sb.append(abfsConfiguration.getClusterType()); + sb.append(")"); - final String userAgentComment = sb.toString(); - String customUserAgentId = abfsConfiguration.getCustomUserAgentPrefix(); - if (customUserAgentId != null && !customUserAgentId.isEmpty()) { - return String.format(Locale.ROOT, CLIENT_VERSION + " %s %s", - userAgentComment, customUserAgentId); + + appendIfNotEmpty(sb, abfsConfiguration.getCustomUserAgentPrefix(), false); + + return String.format(Locale.ROOT, sb.toString()); + } + + private void appendIfNotEmpty(StringBuilder sb, String regEx, + boolean shouldAppendSemiColon) { + if (regEx == null || regEx.trim().isEmpty()) { + return; + } + sb.append(SINGLE_WHITE_SPACE); + sb.append(regEx); + if (shouldAppendSemiColon) { + sb.append(SEMICOLON); } - return String.format(Locale.ROOT, CLIENT_VERSION + " %s", userAgentComment); } @VisibleForTesting @@ -716,4 +984,12 @@ URL getBaseUrl() { public SASTokenProvider getSasTokenProvider() { return this.sasTokenProvider; } + + /** + * Getter for abfsCounters from AbfsClient. + * @return AbfsCounters instance. + */ + protected AbfsCounters getAbfsCounters() { + return abfsCounters; + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientContext.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientContext.java new file mode 100644 index 0000000000000..ad20550af7c3f --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientContext.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +/** + * Class to hold extra configurations for AbfsClient and further classes + * inside AbfsClient. + */ +public class AbfsClientContext { + + private final ExponentialRetryPolicy exponentialRetryPolicy; + private final AbfsPerfTracker abfsPerfTracker; + private final AbfsCounters abfsCounters; + + AbfsClientContext( + ExponentialRetryPolicy exponentialRetryPolicy, + AbfsPerfTracker abfsPerfTracker, + AbfsCounters abfsCounters) { + this.exponentialRetryPolicy = exponentialRetryPolicy; + this.abfsPerfTracker = abfsPerfTracker; + this.abfsCounters = abfsCounters; + } + + public ExponentialRetryPolicy getExponentialRetryPolicy() { + return exponentialRetryPolicy; + } + + public AbfsPerfTracker getAbfsPerfTracker() { + return abfsPerfTracker; + } + + public AbfsCounters getAbfsCounters() { + return abfsCounters; + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientContextBuilder.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientContextBuilder.java new file mode 100644 index 0000000000000..00513f7138d53 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientContextBuilder.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +/** + * A builder for AbfsClientContext class with different options to select and + * build from. + */ +public class AbfsClientContextBuilder { + + private ExponentialRetryPolicy exponentialRetryPolicy; + private AbfsPerfTracker abfsPerfTracker; + private AbfsCounters abfsCounters; + + public AbfsClientContextBuilder withExponentialRetryPolicy( + final ExponentialRetryPolicy exponentialRetryPolicy) { + this.exponentialRetryPolicy = exponentialRetryPolicy; + return this; + } + + public AbfsClientContextBuilder withAbfsPerfTracker( + final AbfsPerfTracker abfsPerfTracker) { + this.abfsPerfTracker = abfsPerfTracker; + return this; + } + + public AbfsClientContextBuilder withAbfsCounters(final AbfsCounters abfsCounters) { + this.abfsCounters = abfsCounters; + return this; + } + + /** + * Build the context and get the instance with the properties selected. + * + * @return an instance of AbfsClientContext. + */ + public AbfsClientContext build() { + //validate the values + return new AbfsClientContext(exponentialRetryPolicy, abfsPerfTracker, + abfsCounters); + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingAnalyzer.java index f1e5aaae6835c..a55c924dd8152 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingAnalyzer.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingAnalyzer.java @@ -24,8 +24,8 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,16 +114,19 @@ public void addBytesTransferred(long count, boolean isFailedOperation) { /** * Suspends the current storage operation, as necessary, to reduce throughput. + * @return true if Thread sleeps(Throttling occurs) else false. */ - public void suspendIfNecessary() { + public boolean suspendIfNecessary() { int duration = sleepDuration; if (duration > 0) { try { Thread.sleep(duration); + return true; } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } + return false; } @VisibleForTesting @@ -269,4 +272,4 @@ static class AbfsOperationMetrics { this.operationsSuccessful = new AtomicLong(); } } -} \ No newline at end of file +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java index 1c6ce17a38c3c..7303e833418db 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java @@ -23,6 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.azurebfs.AbfsStatistic; import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; /** @@ -103,17 +104,24 @@ static void updateMetrics(AbfsRestOperationType operationType, * uses this to suspend the request, if necessary, to minimize errors and * maximize throughput. */ - static void sendingRequest(AbfsRestOperationType operationType) { + static void sendingRequest(AbfsRestOperationType operationType, + AbfsCounters abfsCounters) { if (!isAutoThrottlingEnabled) { return; } switch (operationType) { case ReadFile: - singleton.readThrottler.suspendIfNecessary(); + if (singleton.readThrottler.suspendIfNecessary() + && abfsCounters != null) { + abfsCounters.incrementCounter(AbfsStatistic.READ_THROTTLES, 1); + } break; case Append: - singleton.writeThrottler.suspendIfNecessary(); + if (singleton.writeThrottler.suspendIfNecessary() + && abfsCounters != null) { + abfsCounters.incrementCounter(AbfsStatistic.WRITE_THROTTLES, 1); + } break; default: break; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsCounters.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsCounters.java new file mode 100644 index 0000000000000..73996f5df8857 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsCounters.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.util.Map; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.azurebfs.AbfsStatistic; + +/** + * An interface for Abfs counters. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public interface AbfsCounters { + + /** + * Increment a AbfsStatistic by a long value. + * + * @param statistic AbfsStatistic to be incremented. + * @param value the value to increment the statistic by. + */ + void incrementCounter(AbfsStatistic statistic, long value); + + /** + * Form a String of the all the statistics and present in an organized manner. + * + * @param prefix the prefix to be set. + * @param separator the separator between the statistic name and value. + * @param suffix the suffix to be used. + * @param all enable all the statistics to be displayed or not. + * @return String of all the statistics and their values. + */ + String formString(String prefix, String separator, String suffix, + boolean all); + + /** + * Convert all the statistics into a key-value pair map to be used for + * testing. + * + * @return map with statistic name as key and statistic value as the map + * value. + */ + @VisibleForTesting + Map toMap(); + +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java index 881d41f65f27d..720b99b9f8390 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java @@ -51,6 +51,8 @@ public class AbfsHttpOperation implements AbfsPerfLoggable { private static final Logger LOG = LoggerFactory.getLogger(AbfsHttpOperation.class); + public static final String SIGNATURE_QUERY_PARAM_KEY = "sig="; + private static final int CONNECT_TIMEOUT = 30 * 1000; private static final int READ_TIMEOUT = 30 * 1000; @@ -61,6 +63,8 @@ public class AbfsHttpOperation implements AbfsPerfLoggable { private final String method; private final URL url; + private String maskedUrl; + private String maskedEncodedUrl; private HttpURLConnection connection; private int statusCode; @@ -69,6 +73,7 @@ public class AbfsHttpOperation implements AbfsPerfLoggable { private String storageErrorMessage = ""; private String clientRequestId = ""; private String requestId = ""; + private String expectedAppendPos = ""; private ListResultSchema listResultSchema = null; // metrics @@ -81,6 +86,30 @@ public class AbfsHttpOperation implements AbfsPerfLoggable { private long sendRequestTimeMs; private long recvResponseTimeMs; + public static AbfsHttpOperation getAbfsHttpOperationWithFixedResult( + final URL url, + final String method, + final int httpStatus) { + AbfsHttpOperationWithFixedResult httpOp + = new AbfsHttpOperationWithFixedResult(url, method, httpStatus); + return httpOp; + } + + /** + * Constructor for FixedResult instance, avoiding connection init. + * @param url request url + * @param method Http method + * @param httpStatus HttpStatus + */ + protected AbfsHttpOperation(final URL url, + final String method, + final int httpStatus) { + this.isTraceEnabled = LOG.isTraceEnabled(); + this.url = url; + this.method = method; + this.statusCode = httpStatus; + } + protected HttpURLConnection getConnection() { return connection; } @@ -89,8 +118,8 @@ public String getMethod() { return method; } - public URL getUrl() { - return url; + public String getHost() { + return url.getHost(); } public int getStatusCode() { @@ -113,6 +142,10 @@ public String getClientRequestId() { return clientRequestId; } + public String getExpectedAppendPos() { + return expectedAppendPos; + } + public String getRequestId() { return requestId; } @@ -136,11 +169,12 @@ public String getResponseHeader(String httpHeader) { // Returns a trace message for the request @Override public String toString() { - final String urlStr = url.toString(); final StringBuilder sb = new StringBuilder(); sb.append(statusCode); sb.append(","); sb.append(storageErrorCode); + sb.append(","); + sb.append(expectedAppendPos); sb.append(",cid="); sb.append(clientRequestId); sb.append(",rid="); @@ -160,19 +194,12 @@ public String toString() { sb.append(","); sb.append(method); sb.append(","); - sb.append(urlStr); + sb.append(getSignatureMaskedUrl()); return sb.toString(); } // Returns a trace message for the ABFS API logging service to consume public String getLogString() { - String urlStr = null; - - try { - urlStr = URLEncoder.encode(url.toString(), "UTF-8"); - } catch(UnsupportedEncodingException e) { - urlStr = "https%3A%2F%2Ffailed%2Fto%2Fencode%2Furl"; - } final StringBuilder sb = new StringBuilder(); sb.append("s=") @@ -200,7 +227,7 @@ public String getLogString() { .append(" m=") .append(method) .append(" u=") - .append(urlStr); + .append(getSignatureMaskedEncodedUrl()); return sb.toString(); } @@ -436,6 +463,9 @@ private void processStorageErrorResponse() { case "message": storageErrorMessage = fieldValue; break; + case "ExpectedAppendPos": + expectedAppendPos = fieldValue; + break; default: break; } @@ -490,4 +520,62 @@ private void parseListFilesResponse(final InputStream stream) throws IOException private boolean isNullInputStream(InputStream stream) { return stream == null ? true : false; } + + public static String getSignatureMaskedUrl(String url) { + int qpStrIdx = url.indexOf('?' + SIGNATURE_QUERY_PARAM_KEY); + if (qpStrIdx == -1) { + qpStrIdx = url.indexOf('&' + SIGNATURE_QUERY_PARAM_KEY); + } + if (qpStrIdx == -1) { + return url; + } + final int sigStartIdx = qpStrIdx + SIGNATURE_QUERY_PARAM_KEY.length() + 1; + final int ampIdx = url.indexOf("&", sigStartIdx); + final int sigEndIndex = (ampIdx != -1) ? ampIdx : url.length(); + String signature = url.substring(sigStartIdx, sigEndIndex); + return url.replace(signature, "XXXX"); + } + + public static String encodedUrlStr(String url) { + try { + return URLEncoder.encode(url, "UTF-8"); + } catch (UnsupportedEncodingException e) { + return "https%3A%2F%2Ffailed%2Fto%2Fencode%2Furl"; + } + } + + public String getSignatureMaskedUrl() { + if (this.maskedUrl == null) { + this.maskedUrl = getSignatureMaskedUrl(this.url.toString()); + } + return this.maskedUrl; + } + + public String getSignatureMaskedEncodedUrl() { + if (this.maskedEncodedUrl == null) { + this.maskedEncodedUrl = encodedUrlStr(getSignatureMaskedUrl()); + } + return this.maskedEncodedUrl; + } + + public static class AbfsHttpOperationWithFixedResult extends AbfsHttpOperation { + /** + * Creates an instance to represent fixed results. + * This is used in idempotency handling. + * + * @param url The full URL including query string parameters. + * @param method The HTTP method (PUT, PATCH, POST, GET, HEAD, or DELETE). + * @param httpStatus StatusCode to hard set + */ + public AbfsHttpOperationWithFixedResult(final URL url, + final String method, + final int httpStatus) { + super(url, method, httpStatus); + } + + @Override + public String getResponseHeader(final String httpHeader) { + return ""; + } + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java index 8dc3b8f07079d..0dd3dcf065fbe 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java @@ -23,7 +23,11 @@ import java.io.IOException; import java.net.HttpURLConnection; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.fs.FSExceptionMessages; @@ -32,15 +36,30 @@ import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; +import org.apache.hadoop.fs.azurebfs.utils.CachedSASToken; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; + +import static java.lang.Math.max; +import static java.lang.Math.min; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_KB; import static org.apache.hadoop.util.StringUtils.toLowerCase; /** * The AbfsInputStream for AbfsClient. */ public class AbfsInputStream extends FSInputStream implements CanUnbuffer, - StreamCapabilities { + StreamCapabilities, IOStatisticsSource { + private static final Logger LOG = LoggerFactory.getLogger(AbfsInputStream.class); + // Footer size is set to qualify for both ORC and parquet files + public static final int FOOTER_SIZE = 16 * ONE_KB; + public static final int MAX_OPTIMIZED_READ_ATTEMPTS = 2; + private int readAheadBlockSize; private final AbfsClient client; private final Statistics statistics; private final String path; @@ -50,7 +69,19 @@ public class AbfsInputStream extends FSInputStream implements CanUnbuffer, private final String eTag; // eTag of the path when InputStream are created private final boolean tolerateOobAppends; // whether tolerate Oob Appends private final boolean readAheadEnabled; // whether enable readAhead; + private final boolean alwaysReadBufferSize; + /* + * By default the pread API will do a seek + read as in FSInputStream. + * The read data will be kept in a buffer. When bufferedPreadDisabled is true, + * the pread API will read only the specified amount of data from the given + * offset and the buffer will not come into use at all. + * @see #read(long, byte[], int, int) + */ + private final boolean bufferedPreadDisabled; + private boolean firstRead = true; + // SAS tokens can be re-used until they expire + private CachedSASToken cachedSasToken; private byte[] buffer = null; // will be initialized on first use private long fCursor = 0; // cursor of buffer within file - offset of next byte to read from remote server @@ -60,30 +91,95 @@ public class AbfsInputStream extends FSInputStream implements CanUnbuffer, // of valid bytes in buffer) private boolean closed = false; + // Optimisations modify the pointer fields. + // For better resilience the following fields are used to save the + // existing state before optimization flows. + private int limitBkp; + private int bCursorBkp; + private long fCursorBkp; + private long fCursorAfterLastReadBkp; + + /** Stream statistics. */ + private final AbfsInputStreamStatistics streamStatistics; + private long bytesFromReadAhead; // bytes read from readAhead; for testing + private long bytesFromRemoteRead; // bytes read remotely; for testing + + private final AbfsInputStreamContext context; + private IOStatistics ioStatistics; + public AbfsInputStream( - final AbfsClient client, - final Statistics statistics, - final String path, - final long contentLength, - final int bufferSize, - final int readAheadQueueDepth, - final boolean tolerateOobAppends, - final String eTag) { + final AbfsClient client, + final Statistics statistics, + final String path, + final long contentLength, + final AbfsInputStreamContext abfsInputStreamContext, + final String eTag) { this.client = client; this.statistics = statistics; this.path = path; this.contentLength = contentLength; - this.bufferSize = bufferSize; - this.readAheadQueueDepth = (readAheadQueueDepth >= 0) ? readAheadQueueDepth : Runtime.getRuntime().availableProcessors(); - this.tolerateOobAppends = tolerateOobAppends; + this.bufferSize = abfsInputStreamContext.getReadBufferSize(); + this.readAheadQueueDepth = abfsInputStreamContext.getReadAheadQueueDepth(); + this.tolerateOobAppends = abfsInputStreamContext.isTolerateOobAppends(); this.eTag = eTag; this.readAheadEnabled = true; + this.alwaysReadBufferSize + = abfsInputStreamContext.shouldReadBufferSizeAlways(); + this.bufferedPreadDisabled = abfsInputStreamContext + .isBufferedPreadDisabled(); + this.cachedSasToken = new CachedSASToken( + abfsInputStreamContext.getSasTokenRenewPeriodForStreamsInSeconds()); + this.streamStatistics = abfsInputStreamContext.getStreamStatistics(); + this.context = abfsInputStreamContext; + readAheadBlockSize = abfsInputStreamContext.getReadAheadBlockSize(); + + // Propagate the config values to ReadBufferManager so that the first instance + // to initialize can set the readAheadBlockSize + ReadBufferManager.setReadBufferManagerConfigs(readAheadBlockSize); + if (streamStatistics != null) { + ioStatistics = streamStatistics.getIOStatistics(); + } } public String getPath() { return path; } + @Override + public int read(long position, byte[] buffer, int offset, int length) + throws IOException { + // When bufferedPreadDisabled = true, this API does not use any shared buffer, + // cursor position etc. So this is implemented as NOT synchronized. HBase + // kind of random reads on a shared file input stream will greatly get + // benefited by such implementation. + // Strict close check at the begin of the API only not for the entire flow. + synchronized (this) { + if (closed) { + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + } + } + LOG.debug("pread requested offset = {} len = {} bufferedPreadDisabled = {}", + offset, length, bufferedPreadDisabled); + if (!bufferedPreadDisabled) { + return super.read(position, buffer, offset, length); + } + validatePositionedReadArgs(position, buffer, offset, length); + if (length == 0) { + return 0; + } + if (streamStatistics != null) { + streamStatistics.readOperationStarted(); + } + int bytesRead = readRemote(position, buffer, offset, length); + if (statistics != null) { + statistics.incrementBytesRead(bytesRead); + } + if (streamStatistics != null) { + streamStatistics.bytesRead(bytesRead); + } + return bytesRead; + } + @Override public int read() throws IOException { byte[] b = new byte[1]; @@ -97,12 +193,30 @@ public int read() throws IOException { @Override public synchronized int read(final byte[] b, final int off, final int len) throws IOException { + // check if buffer is null before logging the length + if (b != null) { + LOG.debug("read requested b.length = {} offset = {} len = {}", b.length, + off, len); + } else { + LOG.debug("read requested b = null offset = {} len = {}", off, len); + } + int currentOff = off; int currentLen = len; int lastReadBytes; int totalReadBytes = 0; + if (streamStatistics != null) { + streamStatistics.readOperationStarted(); + } + incrementReadOps(); do { - lastReadBytes = readOneBlock(b, currentOff, currentLen); + if (shouldReadFully()) { + lastReadBytes = readFileCompletely(b, currentOff, currentLen); + } else if (shouldReadLastBlock()) { + lastReadBytes = readLastBlock(b, currentOff, currentLen); + } else { + lastReadBytes = readOneBlock(b, currentOff, currentLen); + } if (lastReadBytes > 0) { currentOff += lastReadBytes; currentLen -= lastReadBytes; @@ -115,25 +229,24 @@ public synchronized int read(final byte[] b, final int off, final int len) throw return totalReadBytes > 0 ? totalReadBytes : lastReadBytes; } - private int readOneBlock(final byte[] b, final int off, final int len) throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } + private boolean shouldReadFully() { + return this.firstRead && this.context.readSmallFilesCompletely() + && this.contentLength <= this.bufferSize; + } - Preconditions.checkNotNull(b); + private boolean shouldReadLastBlock() { + long footerStart = max(0, this.contentLength - FOOTER_SIZE); + return this.firstRead && this.context.optimizeFooterRead() + && this.fCursor >= footerStart; + } + private int readOneBlock(final byte[] b, final int off, final int len) throws IOException { if (len == 0) { return 0; } - - if (this.available() == 0) { + if (!validate(b, off, len)) { return -1; } - - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - //If buffer is empty, then fill the buffer. if (bCursor == limit) { //If EOF, then return -1 @@ -146,14 +259,22 @@ private int readOneBlock(final byte[] b, final int off, final int len) throws IO bCursor = 0; limit = 0; if (buffer == null) { + LOG.debug("created new buffer size {}", bufferSize); buffer = new byte[bufferSize]; } - // Enable readAhead when reading sequentially - if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) { + if (alwaysReadBufferSize) { bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false); } else { - bytesRead = readInternal(fCursor, buffer, 0, b.length, true); + // Enable readAhead when reading sequentially + if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) { + bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false); + } else { + bytesRead = readInternal(fCursor, buffer, 0, b.length, true); + } + } + if (firstRead) { + firstRead = false; } if (bytesRead == -1) { @@ -164,20 +285,136 @@ private int readOneBlock(final byte[] b, final int off, final int len) throws IO fCursor += bytesRead; fCursorAfterLastRead = fCursor; } + return copyToUserBuffer(b, off, len); + } + + private int readFileCompletely(final byte[] b, final int off, final int len) + throws IOException { + if (len == 0) { + return 0; + } + if (!validate(b, off, len)) { + return -1; + } + savePointerState(); + // data need to be copied to user buffer from index bCursor, bCursor has + // to be the current fCusor + bCursor = (int) fCursor; + return optimisedRead(b, off, len, 0, contentLength); + } + + private int readLastBlock(final byte[] b, final int off, final int len) + throws IOException { + if (len == 0) { + return 0; + } + if (!validate(b, off, len)) { + return -1; + } + savePointerState(); + // data need to be copied to user buffer from index bCursor, + // AbfsInutStream buffer is going to contain data from last block start. In + // that case bCursor will be set to fCursor - lastBlockStart + long lastBlockStart = max(0, contentLength - bufferSize); + bCursor = (int) (fCursor - lastBlockStart); + // 0 if contentlength is < buffersize + long actualLenToRead = min(bufferSize, contentLength); + return optimisedRead(b, off, len, lastBlockStart, actualLenToRead); + } + + private int optimisedRead(final byte[] b, final int off, final int len, + final long readFrom, final long actualLen) throws IOException { + fCursor = readFrom; + int totalBytesRead = 0; + int lastBytesRead = 0; + try { + buffer = new byte[bufferSize]; + for (int i = 0; + i < MAX_OPTIMIZED_READ_ATTEMPTS && fCursor < contentLength; i++) { + lastBytesRead = readInternal(fCursor, buffer, limit, + (int) actualLen - limit, true); + if (lastBytesRead > 0) { + totalBytesRead += lastBytesRead; + limit += lastBytesRead; + fCursor += lastBytesRead; + fCursorAfterLastRead = fCursor; + } + } + } catch (IOException e) { + LOG.debug("Optimized read failed. Defaulting to readOneBlock {}", e); + restorePointerState(); + return readOneBlock(b, off, len); + } finally { + firstRead = false; + } + if (totalBytesRead < 1) { + restorePointerState(); + return -1; + } + // If the read was partial and the user requested part of data has + // not read then fallback to readoneblock. When limit is smaller than + // bCursor that means the user requested data has not been read. + if (fCursor < contentLength && bCursor > limit) { + restorePointerState(); + return readOneBlock(b, off, len); + } + return copyToUserBuffer(b, off, len); + } + + private void savePointerState() { + // Saving the current state for fall back ifn case optimization fails + this.limitBkp = this.limit; + this.fCursorBkp = this.fCursor; + this.fCursorAfterLastReadBkp = this.fCursorAfterLastRead; + this.bCursorBkp = this.bCursor; + } + + private void restorePointerState() { + // Saving the current state for fall back ifn case optimization fails + this.limit = this.limitBkp; + this.fCursor = this.fCursorBkp; + this.fCursorAfterLastRead = this.fCursorAfterLastReadBkp; + this.bCursor = this.bCursorBkp; + } + private boolean validate(final byte[] b, final int off, final int len) + throws IOException { + if (closed) { + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + } + + Preconditions.checkNotNull(b); + LOG.debug("read one block requested b.length = {} off {} len {}", b.length, + off, len); + + if (this.available() == 0) { + return false; + } + + if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } + return true; + } + + private int copyToUserBuffer(byte[] b, int off, int len){ //If there is anything in the buffer, then return lesser of (requested bytes) and (bytes in buffer) //(bytes returned may be less than requested) int bytesRemaining = limit - bCursor; - int bytesToRead = Math.min(len, bytesRemaining); + int bytesToRead = min(len, bytesRemaining); System.arraycopy(buffer, bCursor, b, off, bytesToRead); bCursor += bytesToRead; if (statistics != null) { statistics.incrementBytesRead(bytesToRead); } + if (streamStatistics != null) { + // Bytes read from the local buffer. + streamStatistics.bytesReadFromBuffer(bytesToRead); + streamStatistics.bytesRead(bytesToRead); + } return bytesToRead; } - private int readInternal(final long position, final byte[] b, final int offset, final int length, final boolean bypassReadAhead) throws IOException { if (readAheadEnabled && !bypassReadAhead) { @@ -189,18 +426,30 @@ private int readInternal(final long position, final byte[] b, final int offset, // queue read-aheads int numReadAheads = this.readAheadQueueDepth; - long nextSize; long nextOffset = position; + // First read to queue needs to be of readBufferSize and later + // of readAhead Block size + long nextSize = min((long) bufferSize, contentLength - nextOffset); + LOG.debug("read ahead enabled issuing readheads num = {}", numReadAheads); while (numReadAheads > 0 && nextOffset < contentLength) { - nextSize = Math.min((long) bufferSize, contentLength - nextOffset); + LOG.debug("issuing read ahead requestedOffset = {} requested size {}", + nextOffset, nextSize); ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize); nextOffset = nextOffset + nextSize; numReadAheads--; + // From next round onwards should be of readahead block size. + nextSize = min((long) readAheadBlockSize, contentLength - nextOffset); } // try reading from buffers first receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b); + bytesFromReadAhead += receivedBytes; if (receivedBytes > 0) { + incrementReadOps(); + LOG.debug("Received data from read ahead, not doing remote read"); + if (streamStatistics != null) { + streamStatistics.readAheadBytesRead(receivedBytes); + } return receivedBytes; } @@ -208,6 +457,7 @@ private int readInternal(final long position, final byte[] b, final int offset, receivedBytes = readRemote(position, b, offset, length); return receivedBytes; } else { + LOG.debug("read ahead disabled, reading remote"); return readRemote(position, b, offset, length); } } @@ -234,8 +484,19 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti final AbfsRestOperation op; AbfsPerfTracker tracker = client.getAbfsPerfTracker(); try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "readRemote", "read")) { - op = client.read(path, position, b, offset, length, tolerateOobAppends ? "*" : eTag); + LOG.trace("Trigger client.read for path={} position={} offset={} length={}", path, position, offset, length); + op = IOStatisticsBinding.trackDuration((IOStatisticsStore) ioStatistics, + StoreStatisticNames.ACTION_HTTP_GET_REQUEST, + () -> client.read(path, position, b, offset, length, + tolerateOobAppends ? "*" : eTag, cachedSasToken.get())); + cachedSasToken.update(op.getSasToken()); + if (streamStatistics != null) { + streamStatistics.remoteReadOperation(); + } + LOG.debug("issuing HTTP GET request params position = {} b.length = {} " + + "offset = {} length = {}", position, b.length, offset, length); perfInfo.registerResult(op.getResult()).registerSuccess(true); + incrementReadOps(); } catch (AzureBlobFileSystemException ex) { if (ex instanceof AbfsRestOperationException) { AbfsRestOperationException ere = (AbfsRestOperationException) ex; @@ -246,12 +507,26 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti throw new IOException(ex); } long bytesRead = op.getResult().getBytesReceived(); + if (streamStatistics != null) { + streamStatistics.remoteBytesRead(bytesRead); + } if (bytesRead > Integer.MAX_VALUE) { throw new IOException("Unexpected Content-Length"); } + LOG.debug("HTTP request read bytes = {}", bytesRead); + bytesFromRemoteRead += bytesRead; return (int) bytesRead; } + /** + * Increment Read Operations. + */ + private void incrementReadOps() { + if (statistics != null) { + statistics.incrementReadOps(1); + } + } + /** * Seek to given position in stream. * @param n position to seek to @@ -260,6 +535,7 @@ int readRemote(long position, byte[] b, int offset, int length) throws IOExcepti */ @Override public synchronized void seek(long n) throws IOException { + LOG.debug("requested seek to position {}", n); if (closed) { throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); } @@ -270,13 +546,21 @@ public synchronized void seek(long n) throws IOException { throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF); } + if (streamStatistics != null) { + streamStatistics.seek(n, fCursor); + } + if (n>=fCursor-limit && n<=fCursor) { // within buffer bCursor = (int) (n-(fCursor-limit)); + if (streamStatistics != null) { + streamStatistics.seekInBuffer(); + } return; } // next read will read from here fCursor = n; + LOG.debug("set fCursor to {}", fCursor); //invalidate buffer limit = 0; @@ -368,6 +652,7 @@ public boolean seekToNewSource(long l) throws IOException { public synchronized void close() throws IOException { closed = true; buffer = null; // de-reference the buffer so it can be GC'ed sooner + LOG.debug("Closing {}", this); } /** @@ -415,4 +700,94 @@ public boolean hasCapability(String capability) { byte[] getBuffer() { return buffer; } + + @VisibleForTesting + protected void setCachedSasToken(final CachedSASToken cachedSasToken) { + this.cachedSasToken = cachedSasToken; + } + + /** + * Getter for AbfsInputStreamStatistics. + * + * @return an instance of AbfsInputStreamStatistics. + */ + @VisibleForTesting + public AbfsInputStreamStatistics getStreamStatistics() { + return streamStatistics; + } + + /** + * Getter for bytes read from readAhead buffer that fills asynchronously. + * + * @return value of the counter in long. + */ + @VisibleForTesting + public long getBytesFromReadAhead() { + return bytesFromReadAhead; + } + + /** + * Getter for bytes read remotely from the data store. + * + * @return value of the counter in long. + */ + @VisibleForTesting + public long getBytesFromRemoteRead() { + return bytesFromRemoteRead; + } + + @VisibleForTesting + public int getBufferSize() { + return bufferSize; + } + + @VisibleForTesting + public int getReadAheadQueueDepth() { + return readAheadQueueDepth; + } + + @VisibleForTesting + public boolean shouldAlwaysReadBufferSize() { + return alwaysReadBufferSize; + } + + @Override + public IOStatistics getIOStatistics() { + return ioStatistics; + } + + /** + * Get the statistics of the stream. + * @return a string value. + */ + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(super.toString()); + if (streamStatistics != null) { + sb.append("AbfsInputStream@(").append(this.hashCode()).append("){"); + sb.append(streamStatistics.toString()); + sb.append("}"); + } + return sb.toString(); + } + + @VisibleForTesting + int getBCursor() { + return this.bCursor; + } + + @VisibleForTesting + long getFCursor() { + return this.fCursor; + } + + @VisibleForTesting + long getFCursorAfterLastRead() { + return this.fCursorAfterLastRead; + } + + @VisibleForTesting + long getLimit() { + return this.limit; + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamContext.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamContext.java new file mode 100644 index 0000000000000..fe41f22a772ff --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamContext.java @@ -0,0 +1,156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Class to hold extra input stream configs. + */ +public class AbfsInputStreamContext extends AbfsStreamContext { + // Retaining logger of AbfsInputStream + private static final Logger LOG = LoggerFactory.getLogger(AbfsInputStream.class); + + private int readBufferSize; + + private int readAheadQueueDepth; + + private boolean tolerateOobAppends; + + private boolean alwaysReadBufferSize; + + private int readAheadBlockSize; + + private AbfsInputStreamStatistics streamStatistics; + + private boolean readSmallFilesCompletely; + + private boolean optimizeFooterRead; + + private boolean bufferedPreadDisabled; + + public AbfsInputStreamContext(final long sasTokenRenewPeriodForStreamsInSeconds) { + super(sasTokenRenewPeriodForStreamsInSeconds); + } + + public AbfsInputStreamContext withReadBufferSize(final int readBufferSize) { + this.readBufferSize = readBufferSize; + return this; + } + + public AbfsInputStreamContext withReadAheadQueueDepth( + final int readAheadQueueDepth) { + this.readAheadQueueDepth = (readAheadQueueDepth >= 0) + ? readAheadQueueDepth + : Runtime.getRuntime().availableProcessors(); + return this; + } + + public AbfsInputStreamContext withTolerateOobAppends( + final boolean tolerateOobAppends) { + this.tolerateOobAppends = tolerateOobAppends; + return this; + } + + public AbfsInputStreamContext withStreamStatistics( + final AbfsInputStreamStatistics streamStatistics) { + this.streamStatistics = streamStatistics; + return this; + } + + public AbfsInputStreamContext withReadSmallFilesCompletely( + final boolean readSmallFilesCompletely) { + this.readSmallFilesCompletely = readSmallFilesCompletely; + return this; + } + + public AbfsInputStreamContext withOptimizeFooterRead( + final boolean optimizeFooterRead) { + this.optimizeFooterRead = optimizeFooterRead; + return this; + } + + public AbfsInputStreamContext withShouldReadBufferSizeAlways( + final boolean alwaysReadBufferSize) { + this.alwaysReadBufferSize = alwaysReadBufferSize; + return this; + } + + public AbfsInputStreamContext withReadAheadBlockSize( + final int readAheadBlockSize) { + this.readAheadBlockSize = readAheadBlockSize; + return this; + } + + public AbfsInputStreamContext withBufferedPreadDisabled( + final boolean bufferedPreadDisabled) { + this.bufferedPreadDisabled = bufferedPreadDisabled; + return this; + } + + public AbfsInputStreamContext build() { + if (readBufferSize > readAheadBlockSize) { + LOG.debug( + "fs.azure.read.request.size[={}] is configured for higher size than " + + "fs.azure.read.readahead.blocksize[={}]. Auto-align " + + "readAhead block size to be same as readRequestSize.", + readBufferSize, readAheadBlockSize); + readAheadBlockSize = readBufferSize; + } + // Validation of parameters to be done here. + return this; + } + + public int getReadBufferSize() { + return readBufferSize; + } + + public int getReadAheadQueueDepth() { + return readAheadQueueDepth; + } + + public boolean isTolerateOobAppends() { + return tolerateOobAppends; + } + + public AbfsInputStreamStatistics getStreamStatistics() { + return streamStatistics; + } + + public boolean readSmallFilesCompletely() { + return this.readSmallFilesCompletely; + } + + public boolean optimizeFooterRead() { + return this.optimizeFooterRead; + } + + public boolean shouldReadBufferSizeAlways() { + return alwaysReadBufferSize; + } + + public int getReadAheadBlockSize() { + return readAheadBlockSize; + } + + public boolean isBufferedPreadDisabled() { + return bufferedPreadDisabled; + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatistics.java new file mode 100644 index 0000000000000..00663467fe233 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatistics.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; + +/** + * Interface for statistics for the AbfsInputStream. + */ +@InterfaceStability.Unstable +public interface AbfsInputStreamStatistics extends IOStatisticsSource { + /** + * Seek backwards, incrementing the seek and backward seek counters. + * + * @param negativeOffset how far was the seek? + * This is expected to be negative. + */ + void seekBackwards(long negativeOffset); + + /** + * Record a forward seek, adding a seek operation, a forward + * seek operation, and any bytes skipped. + * + * @param skipped number of bytes skipped by reading from the stream. + * If the seek was implemented by a close + reopen, set this to zero. + */ + void seekForwards(long skipped); + + /** + * Record a forward or backward seek, adding a seek operation, a forward or + * a backward seek operation, and number of bytes skipped. + * + * @param seekTo seek to the position. + * @param currentPos current position. + */ + void seek(long seekTo, long currentPos); + + /** + * Increment the bytes read counter by the number of bytes; + * no-op if the argument is negative. + * + * @param bytes number of bytes read. + */ + void bytesRead(long bytes); + + /** + * Record the total bytes read from buffer. + * + * @param bytes number of bytes that are read from buffer. + */ + void bytesReadFromBuffer(long bytes); + + /** + * Records the total number of seeks done in the buffer. + */ + void seekInBuffer(); + + /** + * A {@code read(byte[] buf, int off, int len)} operation has started. + */ + void readOperationStarted(); + + /** + * Records a successful remote read operation. + */ + void remoteReadOperation(); + + /** + * Records the bytes read from readAhead buffer. + * @param bytes the bytes to be incremented. + */ + void readAheadBytesRead(long bytes); + + /** + * Records bytes read remotely after nothing from readAheadBuffer was read. + * @param bytes the bytes to be incremented. + */ + void remoteBytesRead(long bytes); + + /** + * Get the IOStatisticsStore instance from AbfsInputStreamStatistics. + * @return instance of IOStatisticsStore which extends IOStatistics. + */ + IOStatistics getIOStatistics(); + + /** + * Makes the string of all the AbfsInputStream statistics. + * @return the string with all the statistics. + */ + @Override + String toString(); +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatisticsImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatisticsImpl.java new file mode 100644 index 0000000000000..bd09762976d7f --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStreamStatisticsImpl.java @@ -0,0 +1,277 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.StreamStatisticNames; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.ACTION_HTTP_GET_REQUEST; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.SUFFIX_MEAN; +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; + +/** + * Stats for the AbfsInputStream. + */ +public class AbfsInputStreamStatisticsImpl + implements AbfsInputStreamStatistics { + + private final IOStatisticsStore ioStatisticsStore = iostatisticsStore() + .withCounters( + StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS, + StreamStatisticNames.STREAM_READ_SEEK_FORWARD_OPERATIONS, + StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS, + StreamStatisticNames.STREAM_READ_BYTES, + StreamStatisticNames.STREAM_READ_SEEK_BYTES_SKIPPED, + StreamStatisticNames.STREAM_READ_OPERATIONS, + StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS, + StreamStatisticNames.SEEK_IN_BUFFER, + StreamStatisticNames.BYTES_READ_BUFFER, + StreamStatisticNames.REMOTE_READ_OP, + StreamStatisticNames.READ_AHEAD_BYTES_READ, + StreamStatisticNames.REMOTE_BYTES_READ + ) + .withDurationTracking(ACTION_HTTP_GET_REQUEST) + .build(); + + /* Reference to the atomic counter for frequently updated counters to avoid + * cost of the map lookup on every increment. + */ + private final AtomicLong bytesRead = + ioStatisticsStore.getCounterReference(StreamStatisticNames.STREAM_READ_BYTES); + private final AtomicLong readOps = + ioStatisticsStore.getCounterReference(StreamStatisticNames.STREAM_READ_OPERATIONS); + private final AtomicLong seekOps = + ioStatisticsStore.getCounterReference(StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS); + + /** + * Seek backwards, incrementing the seek and backward seek counters. + * + * @param negativeOffset how far was the seek? + * This is expected to be negative. + */ + @Override + public void seekBackwards(long negativeOffset) { + seekOps.incrementAndGet(); + ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS); + ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS, negativeOffset); + } + + /** + * Record a forward seek, adding a seek operation, a forward + * seek operation, and any bytes skipped. + * + * @param skipped number of bytes skipped by reading from the stream. + * If the seek was implemented by a close + reopen, set this to zero. + */ + @Override + public void seekForwards(long skipped) { + seekOps.incrementAndGet(); + ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_FORWARD_OPERATIONS); + ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_BYTES_SKIPPED, skipped); + } + + /** + * Record a forward or backward seek, adding a seek operation, a forward or + * a backward seek operation, and number of bytes skipped. + * The seek direction will be calculated based on the parameters. + * + * @param seekTo seek to the position. + * @param currentPos current position. + */ + @Override + public void seek(long seekTo, long currentPos) { + if (seekTo >= currentPos) { + this.seekForwards(seekTo - currentPos); + } else { + this.seekBackwards(currentPos - seekTo); + } + } + + /** + * Increment the bytes read counter by the number of bytes; + * no-op if the argument is negative. + * + * @param bytes number of bytes read. + */ + @Override + public void bytesRead(long bytes) { + bytesRead.addAndGet(bytes); + } + + /** + * {@inheritDoc} + * + * Total bytes read from the buffer. + * + * @param bytes number of bytes that are read from buffer. + */ + @Override + public void bytesReadFromBuffer(long bytes) { + ioStatisticsStore.incrementCounter(StreamStatisticNames.BYTES_READ_BUFFER, bytes); + } + + /** + * {@inheritDoc} + * + * Increment the number of seeks in the buffer. + */ + @Override + public void seekInBuffer() { + ioStatisticsStore.incrementCounter(StreamStatisticNames.SEEK_IN_BUFFER); + } + + /** + * A {@code read(byte[] buf, int off, int len)} operation has started. + */ + @Override + public void readOperationStarted() { + readOps.incrementAndGet(); + } + + /** + * Total bytes read from readAhead buffer during a read operation. + * + * @param bytes the bytes to be incremented. + */ + @Override + public void readAheadBytesRead(long bytes) { + ioStatisticsStore.incrementCounter(StreamStatisticNames.READ_AHEAD_BYTES_READ, bytes); + } + + /** + * Total bytes read remotely after nothing was read from readAhead buffer. + * + * @param bytes the bytes to be incremented. + */ + @Override + public void remoteBytesRead(long bytes) { + ioStatisticsStore.incrementCounter(StreamStatisticNames.REMOTE_BYTES_READ, bytes); + } + + /** + * {@inheritDoc} + * + * Increment the counter when a remote read operation occurs. + */ + @Override + public void remoteReadOperation() { + ioStatisticsStore.incrementCounter(StreamStatisticNames.REMOTE_READ_OP); + } + + /** + * Getter for IOStatistics instance used. + * @return IOStatisticsStore instance which extends IOStatistics. + */ + @Override + public IOStatistics getIOStatistics() { + return ioStatisticsStore; + } + + @VisibleForTesting + public long getSeekOperations() { + return ioStatisticsStore.counters().get(StreamStatisticNames.STREAM_READ_SEEK_OPERATIONS); + } + + @VisibleForTesting + public long getForwardSeekOperations() { + return ioStatisticsStore.counters().get(StreamStatisticNames.STREAM_READ_SEEK_FORWARD_OPERATIONS); + } + + @VisibleForTesting + public long getBackwardSeekOperations() { + return ioStatisticsStore.counters().get(StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS); + } + + @VisibleForTesting + public long getBytesRead() { + return ioStatisticsStore.counters().get(StreamStatisticNames.STREAM_READ_BYTES); + } + + @VisibleForTesting + public long getBytesSkippedOnSeek() { + return ioStatisticsStore.counters().get(StreamStatisticNames.STREAM_READ_SEEK_BYTES_SKIPPED); + } + + @VisibleForTesting + public long getBytesBackwardsOnSeek() { + return ioStatisticsStore.counters().get(StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS); + } + + @VisibleForTesting + public long getSeekInBuffer() { + return ioStatisticsStore.counters().get(StreamStatisticNames.SEEK_IN_BUFFER); + + } + + @VisibleForTesting + public long getReadOperations() { + return ioStatisticsStore.counters().get(StreamStatisticNames.STREAM_READ_OPERATIONS); + } + + @VisibleForTesting + public long getBytesReadFromBuffer() { + return ioStatisticsStore.counters().get(StreamStatisticNames.BYTES_READ_BUFFER); + } + + @VisibleForTesting + public long getRemoteReadOperations() { + return ioStatisticsStore.counters().get(StreamStatisticNames.REMOTE_READ_OP); + } + + @VisibleForTesting + public long getReadAheadBytesRead() { + return ioStatisticsStore.counters().get(StreamStatisticNames.READ_AHEAD_BYTES_READ); + } + + @VisibleForTesting + public long getRemoteBytesRead() { + return ioStatisticsStore.counters().get(StreamStatisticNames.REMOTE_BYTES_READ); + } + + /** + * Getter for the mean value of the time taken to complete a HTTP GET + * request by AbfsInputStream. + * @return mean value. + */ + @VisibleForTesting + public double getActionHttpGetRequest() { + return ioStatisticsStore.meanStatistics(). + get(ACTION_HTTP_GET_REQUEST + SUFFIX_MEAN).mean(); + } + + /** + * String operator describes all the current statistics. + * Important: there are no guarantees as to the stability + * of this value. + * + * @return the current values of the stream statistics. + */ + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "StreamStatistics{"); + sb.append(ioStatisticsStore.toString()); + sb.append('}'); + return sb.toString(); + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsIoUtils.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsIoUtils.java index be2dcc54ed8b7..44fa2d8d8bd56 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsIoUtils.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsIoUtils.java @@ -58,6 +58,9 @@ public static void dumpHeadersToDebugLog(final String origin, if (key.contains("Cookie")) { values = "*cookie info*"; } + if (key.equals("sig")) { + values = "XXXX"; + } LOG.debug(" {}={}", key, values); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsListStatusRemoteIterator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsListStatusRemoteIterator.java new file mode 100644 index 0000000000000..0c664fc2fbbc4 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsListStatusRemoteIterator.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import javax.activation.UnsupportedDataTypeException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.RemoteIterator; + +public class AbfsListStatusRemoteIterator + implements RemoteIterator { + + private static final Logger LOG = LoggerFactory + .getLogger(AbfsListStatusRemoteIterator.class); + + private static final boolean FETCH_ALL_FALSE = false; + private static final int MAX_QUEUE_SIZE = 10; + private static final long POLL_WAIT_TIME_IN_MS = 250; + + private final FileStatus fileStatus; + private final ListingSupport listingSupport; + private final ArrayBlockingQueue iteratorsQueue; + + private volatile boolean isAsyncInProgress = false; + private boolean isIterationComplete = false; + private String continuation; + private Iterator currIterator; + + public AbfsListStatusRemoteIterator(final FileStatus fileStatus, + final ListingSupport listingSupport) { + this.fileStatus = fileStatus; + this.listingSupport = listingSupport; + iteratorsQueue = new ArrayBlockingQueue<>(MAX_QUEUE_SIZE); + currIterator = Collections.emptyIterator(); + fetchBatchesAsync(); + } + + @Override + public boolean hasNext() throws IOException { + if (currIterator.hasNext()) { + return true; + } + currIterator = getNextIterator(); + return currIterator.hasNext(); + } + + @Override + public FileStatus next() throws IOException { + if (!this.hasNext()) { + throw new NoSuchElementException(); + } + return currIterator.next(); + } + + private Iterator getNextIterator() throws IOException { + fetchBatchesAsync(); + try { + Object obj = null; + while (obj == null + && (!isIterationComplete || !iteratorsQueue.isEmpty())) { + obj = iteratorsQueue.poll(POLL_WAIT_TIME_IN_MS, TimeUnit.MILLISECONDS); + } + if (obj == null) { + return Collections.emptyIterator(); + } else if (obj instanceof Iterator) { + return (Iterator) obj; + } else if (obj instanceof IOException) { + throw (IOException) obj; + } else { + throw new UnsupportedDataTypeException(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOG.error("Thread got interrupted: {}", e); + throw new IOException(e); + } + } + + private void fetchBatchesAsync() { + if (isAsyncInProgress || isIterationComplete) { + return; + } + synchronized (this) { + if (isAsyncInProgress || isIterationComplete) { + return; + } + isAsyncInProgress = true; + } + CompletableFuture.runAsync(() -> asyncOp()); + } + + private void asyncOp() { + try { + while (!isIterationComplete && iteratorsQueue.size() <= MAX_QUEUE_SIZE) { + addNextBatchIteratorToQueue(); + } + } catch (IOException ioe) { + LOG.error("Fetching filestatuses failed", ioe); + try { + iteratorsQueue.put(ioe); + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); + LOG.error("Thread got interrupted: {}", interruptedException); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOG.error("Thread got interrupted: {}", e); + } finally { + synchronized (this) { + isAsyncInProgress = false; + } + } + } + + private void addNextBatchIteratorToQueue() + throws IOException, InterruptedException { + List fileStatuses = new ArrayList<>(); + continuation = listingSupport + .listStatus(fileStatus.getPath(), null, fileStatuses, FETCH_ALL_FALSE, + continuation); + if (!fileStatuses.isEmpty()) { + iteratorsQueue.put(fileStatuses.iterator()); + } + synchronized (this) { + if (continuation == null || continuation.isEmpty()) { + isIterationComplete = true; + } + } + } + +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java index 7e9746d118ce8..2d02019ab11c6 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java @@ -24,37 +24,55 @@ import java.io.OutputStream; import java.net.HttpURLConnection; import java.nio.ByteBuffer; -import java.util.Locale; import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.Callable; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; +import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters; +import org.apache.hadoop.fs.azurebfs.utils.CachedSASToken; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; +import org.apache.hadoop.fs.statistics.StreamStatisticNames; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; import org.apache.hadoop.io.ElasticByteBufferPool; +import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; +import static org.apache.hadoop.fs.impl.StoreImplementationUtils.isProbeForSyncable; import static org.apache.hadoop.io.IOUtils.wrapException; +import static org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode.APPEND_MODE; +import static org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode.FLUSH_CLOSE_MODE; +import static org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode.FLUSH_MODE; /** * The BlobFsOutputStream for Rest AbfsClient. */ -public class AbfsOutputStream extends OutputStream implements Syncable, StreamCapabilities { +public class AbfsOutputStream extends OutputStream implements Syncable, + StreamCapabilities, IOStatisticsSource { + private final AbfsClient client; private final String path; private long position; private boolean closed; private boolean supportFlush; private boolean disableOutputStreamFlush; + private boolean enableSmallWriteOptimization; + private boolean isAppendBlob; private volatile IOException lastError; private long lastFlushOffset; @@ -63,43 +81,67 @@ public class AbfsOutputStream extends OutputStream implements Syncable, StreamCa private final int bufferSize; private byte[] buffer; private int bufferIndex; + private int numOfAppendsToServerSinceLastFlush; private final int maxConcurrentRequestCount; + private final int maxRequestsThatCanBeQueued; private ConcurrentLinkedDeque writeOperations; private final ThreadPoolExecutor threadExecutor; private final ExecutorCompletionService completionService; + // SAS tokens can be re-used until they expire + private CachedSASToken cachedSasToken; + /** * Queue storing buffers with the size of the Azure block ready for * reuse. The pool allows reusing the blocks instead of allocating new * blocks. After the data is sent to the service, the buffer is returned * back to the queue */ - private final ElasticByteBufferPool byteBufferPool + private ElasticByteBufferPool byteBufferPool = new ElasticByteBufferPool(); + private final Statistics statistics; + private final AbfsOutputStreamStatistics outputStreamStatistics; + private IOStatistics ioStatistics; + + private static final Logger LOG = + LoggerFactory.getLogger(AbfsOutputStream.class); + public AbfsOutputStream( - final AbfsClient client, - final String path, - final long position, - final int bufferSize, - final boolean supportFlush, - final boolean disableOutputStreamFlush) { + final AbfsClient client, + final Statistics statistics, + final String path, + final long position, + AbfsOutputStreamContext abfsOutputStreamContext) { this.client = client; + this.statistics = statistics; this.path = path; this.position = position; this.closed = false; - this.supportFlush = supportFlush; - this.disableOutputStreamFlush = disableOutputStreamFlush; + this.supportFlush = abfsOutputStreamContext.isEnableFlush(); + this.disableOutputStreamFlush = abfsOutputStreamContext + .isDisableOutputStreamFlush(); + this.enableSmallWriteOptimization + = abfsOutputStreamContext.isEnableSmallWriteOptimization(); + this.isAppendBlob = abfsOutputStreamContext.isAppendBlob(); this.lastError = null; this.lastFlushOffset = 0; - this.bufferSize = bufferSize; + this.bufferSize = abfsOutputStreamContext.getWriteBufferSize(); this.buffer = byteBufferPool.getBuffer(false, bufferSize).array(); this.bufferIndex = 0; + this.numOfAppendsToServerSinceLastFlush = 0; this.writeOperations = new ConcurrentLinkedDeque<>(); + this.outputStreamStatistics = abfsOutputStreamContext.getStreamStatistics(); - this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors(); - + if (this.isAppendBlob) { + this.maxConcurrentRequestCount = 1; + } else { + this.maxConcurrentRequestCount = abfsOutputStreamContext + .getWriteMaxConcurrentRequestCount(); + } + this.maxRequestsThatCanBeQueued = abfsOutputStreamContext + .getMaxWriteRequestsToQueue(); this.threadExecutor = new ThreadPoolExecutor(maxConcurrentRequestCount, maxConcurrentRequestCount, @@ -107,6 +149,11 @@ public AbfsOutputStream( TimeUnit.SECONDS, new LinkedBlockingQueue<>()); this.completionService = new ExecutorCompletionService<>(this.threadExecutor); + this.cachedSasToken = new CachedSASToken( + abfsOutputStreamContext.getSasTokenRenewPeriodForStreamsInSeconds()); + if (outputStreamStatistics != null) { + this.ioStatistics = outputStreamStatistics.getIOStatistics(); + } } /** @@ -117,13 +164,7 @@ public AbfsOutputStream( */ @Override public boolean hasCapability(String capability) { - switch (capability.toLowerCase(Locale.ENGLISH)) { - case StreamCapabilities.HSYNC: - case StreamCapabilities.HFLUSH: - return supportFlush; - default: - return false; - } + return supportFlush && isProbeForSyncable(capability); } /** @@ -181,6 +222,16 @@ public synchronized void write(final byte[] data, final int off, final int lengt writableBytes = bufferSize - bufferIndex; } + incrementWriteOps(); + } + + /** + * Increment Write Operations. + */ + private void incrementWriteOps() { + if (statistics != null) { + statistics.incrementWriteOps(1); + } } /** @@ -260,16 +311,41 @@ public synchronized void close() throws IOException { bufferIndex = 0; closed = true; writeOperations.clear(); + byteBufferPool = null; if (!threadExecutor.isShutdown()) { threadExecutor.shutdownNow(); } } + if (LOG.isDebugEnabled()) { + LOG.debug("Closing AbfsOutputStream ", toString()); + } } private synchronized void flushInternal(boolean isClose) throws IOException { maybeThrowLastError(); + + // if its a flush post write < buffersize, send flush parameter in append + if (!isAppendBlob + && enableSmallWriteOptimization + && (numOfAppendsToServerSinceLastFlush == 0) // there are no ongoing store writes + && (writeOperations.size() == 0) // double checking no appends in progress + && (bufferIndex > 0)) { // there is some data that is pending to be written + smallWriteOptimizedflushInternal(isClose); + return; + } + writeCurrentBufferToService(); flushWrittenBytesToService(isClose); + numOfAppendsToServerSinceLastFlush = 0; + } + + private synchronized void smallWriteOptimizedflushInternal(boolean isClose) throws IOException { + // writeCurrentBufferToService will increment numOfAppendsToServerSinceLastFlush + writeCurrentBufferToService(true, isClose); + waitForAppendsToComplete(); + shrinkWriteOperationQueue(); + maybeThrowLastError(); + numOfAppendsToServerSinceLastFlush = 0; } private synchronized void flushInternalAsync() throws IOException { @@ -278,45 +354,126 @@ private synchronized void flushInternalAsync() throws IOException { flushWrittenBytesToServiceAsync(); } - private synchronized void writeCurrentBufferToService() throws IOException { + private void writeAppendBlobCurrentBufferToService() throws IOException { if (bufferIndex == 0) { return; } - final byte[] bytes = buffer; final int bytesLength = bufferIndex; + if (outputStreamStatistics != null) { + outputStreamStatistics.writeCurrentBuffer(); + outputStreamStatistics.bytesToUpload(bytesLength); + } buffer = byteBufferPool.getBuffer(false, bufferSize).array(); bufferIndex = 0; final long offset = position; position += bytesLength; + AbfsPerfTracker tracker = client.getAbfsPerfTracker(); + try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, + "writeCurrentBufferToService", "append")) { + AppendRequestParameters reqParams = new AppendRequestParameters(offset, 0, + bytesLength, APPEND_MODE, true); + AbfsRestOperation op = client.append(path, bytes, reqParams, cachedSasToken.get()); + cachedSasToken.update(op.getSasToken()); + if (outputStreamStatistics != null) { + outputStreamStatistics.uploadSuccessful(bytesLength); + } + perfInfo.registerResult(op.getResult()); + byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); + perfInfo.registerSuccess(true); + return; + } catch (Exception ex) { + if (ex instanceof AbfsRestOperationException) { + if (((AbfsRestOperationException) ex).getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { + throw new FileNotFoundException(ex.getMessage()); + } + } + if (ex instanceof AzureBlobFileSystemException) { + ex = (AzureBlobFileSystemException) ex; + } + lastError = new IOException(ex); + throw lastError; + } + } - if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) { - waitForTaskToComplete(); + private synchronized void writeCurrentBufferToService() throws IOException { + writeCurrentBufferToService(false, false); + } + + private synchronized void writeCurrentBufferToService(boolean isFlush, boolean isClose) throws IOException { + if (this.isAppendBlob) { + writeAppendBlobCurrentBufferToService(); + return; + } + + if (bufferIndex == 0) { + return; } + numOfAppendsToServerSinceLastFlush++; - final Future job = completionService.submit(new Callable() { - @Override - public Void call() throws Exception { - AbfsPerfTracker tracker = client.getAbfsPerfTracker(); - try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, - "writeCurrentBufferToService", "append")) { - AbfsRestOperation op = client.append(path, offset, bytes, 0, - bytesLength); - perfInfo.registerResult(op.getResult()); - byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); - perfInfo.registerSuccess(true); - return null; + final byte[] bytes = buffer; + final int bytesLength = bufferIndex; + if (outputStreamStatistics != null) { + outputStreamStatistics.writeCurrentBuffer(); + outputStreamStatistics.bytesToUpload(bytesLength); + } + buffer = byteBufferPool.getBuffer(false, bufferSize).array(); + bufferIndex = 0; + final long offset = position; + position += bytesLength; + + if (threadExecutor.getQueue().size() >= maxRequestsThatCanBeQueued) { + //Tracking time spent on waiting for task to complete. + if (outputStreamStatistics != null) { + try (DurationTracker ignored = outputStreamStatistics.timeSpentTaskWait()) { + waitForTaskToComplete(); } + } else { + waitForTaskToComplete(); } - }); - + } + final Future job = + completionService.submit(IOStatisticsBinding + .trackDurationOfCallable((IOStatisticsStore) ioStatistics, + StreamStatisticNames.TIME_SPENT_ON_PUT_REQUEST, + () -> { + AbfsPerfTracker tracker = client.getAbfsPerfTracker(); + try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, + "writeCurrentBufferToService", "append")) { + AppendRequestParameters.Mode + mode = APPEND_MODE; + if (isFlush & isClose) { + mode = FLUSH_CLOSE_MODE; + } else if (isFlush) { + mode = FLUSH_MODE; + } + AppendRequestParameters reqParams = new AppendRequestParameters( + offset, 0, bytesLength, mode, false); + AbfsRestOperation op = client.append(path, bytes, reqParams, + cachedSasToken.get()); + cachedSasToken.update(op.getSasToken()); + perfInfo.registerResult(op.getResult()); + byteBufferPool.putBuffer(ByteBuffer.wrap(bytes)); + perfInfo.registerSuccess(true); + return null; + } + }) + ); + + if (outputStreamStatistics != null) { + if (job.isCancelled()) { + outputStreamStatistics.uploadFailed(bytesLength); + } else { + outputStreamStatistics.uploadSuccessful(bytesLength); + } + } writeOperations.add(new WriteOperation(job, offset, bytesLength)); // Try to shrink the queue shrinkWriteOperationQueue(); } - private synchronized void flushWrittenBytesToService(boolean isClose) throws IOException { + private synchronized void waitForAppendsToComplete() throws IOException { for (WriteOperation writeOperation : writeOperations) { try { writeOperation.task.get(); @@ -334,6 +491,10 @@ private synchronized void flushWrittenBytesToService(boolean isClose) throws IOE throw lastError; } } + } + + private synchronized void flushWrittenBytesToService(boolean isClose) throws IOException { + waitForAppendsToComplete(); flushWrittenBytesToServiceInternal(position, false, isClose); } @@ -348,10 +509,16 @@ private synchronized void flushWrittenBytesToServiceAsync() throws IOException { private synchronized void flushWrittenBytesToServiceInternal(final long offset, final boolean retainUncommitedData, final boolean isClose) throws IOException { + // flush is called for appendblob only on close + if (this.isAppendBlob && !isClose) { + return; + } + AbfsPerfTracker tracker = client.getAbfsPerfTracker(); try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker, "flushWrittenBytesToServiceInternal", "flush")) { - AbfsRestOperation op = client.flush(path, offset, retainUncommitedData, isClose); + AbfsRestOperation op = client.flush(path, offset, retainUncommitedData, isClose, cachedSasToken.get()); + cachedSasToken.update(op.getSasToken()); perfInfo.registerResult(op.getResult()).registerSuccess(true); } catch (AzureBlobFileSystemException ex) { if (ex instanceof AbfsRestOperationException) { @@ -374,6 +541,10 @@ private synchronized void shrinkWriteOperationQueue() throws IOException { writeOperations.peek().task.get(); lastTotalAppendOffset += writeOperations.peek().length; writeOperations.remove(); + // Incrementing statistics to indicate queue has been shrunk. + if (outputStreamStatistics != null) { + outputStreamStatistics.queueShrunk(); + } } } catch (Exception e) { if (e.getCause() instanceof AzureBlobFileSystemException) { @@ -390,6 +561,10 @@ private void waitForTaskToComplete() throws IOException { for (completed = false; completionService.poll() != null; completed = true) { // keep polling until there is no data } + // for AppendBLob, jobs are not submitted to completion service + if (isAppendBlob) { + completed = true; + } if (!completed) { try { @@ -421,4 +596,61 @@ private static class WriteOperation { public synchronized void waitForPendingUploads() throws IOException { waitForTaskToComplete(); } + + /** + * Getter method for AbfsOutputStream statistics. + * + * @return statistics for AbfsOutputStream. + */ + @VisibleForTesting + public AbfsOutputStreamStatistics getOutputStreamStatistics() { + return outputStreamStatistics; + } + + /** + * Getter to get the size of the task queue. + * + * @return the number of writeOperations in AbfsOutputStream. + */ + @VisibleForTesting + public int getWriteOperationsSize() { + return writeOperations.size(); + } + + @VisibleForTesting + int getMaxConcurrentRequestCount() { + return this.maxConcurrentRequestCount; + } + + @VisibleForTesting + int getMaxRequestsThatCanBeQueued() { + return maxRequestsThatCanBeQueued; + } + + @VisibleForTesting + Boolean isAppendBlobStream() { + return isAppendBlob; + } + + @Override + public IOStatistics getIOStatistics() { + return ioStatistics; + } + + /** + * Appending AbfsOutputStream statistics to base toString(). + * + * @return String with AbfsOutputStream statistics. + */ + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(super.toString()); + if (outputStreamStatistics != null) { + sb.append("AbfsOutputStream@").append(this.hashCode()); + sb.append("){"); + sb.append(outputStreamStatistics.toString()); + sb.append("}"); + } + return sb.toString(); + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java new file mode 100644 index 0000000000000..925cd4f7b5646 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamContext.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +/** + * Class to hold extra output stream configs. + */ +public class AbfsOutputStreamContext extends AbfsStreamContext { + + private int writeBufferSize; + + private boolean enableFlush; + + private boolean enableSmallWriteOptimization; + + private boolean disableOutputStreamFlush; + + private AbfsOutputStreamStatistics streamStatistics; + + private boolean isAppendBlob; + + private int writeMaxConcurrentRequestCount; + + private int maxWriteRequestsToQueue; + + public AbfsOutputStreamContext(final long sasTokenRenewPeriodForStreamsInSeconds) { + super(sasTokenRenewPeriodForStreamsInSeconds); + } + + public AbfsOutputStreamContext withWriteBufferSize( + final int writeBufferSize) { + this.writeBufferSize = writeBufferSize; + return this; + } + + public AbfsOutputStreamContext enableFlush(final boolean enableFlush) { + this.enableFlush = enableFlush; + return this; + } + + public AbfsOutputStreamContext enableSmallWriteOptimization(final boolean enableSmallWriteOptimization) { + this.enableSmallWriteOptimization = enableSmallWriteOptimization; + return this; + } + + public AbfsOutputStreamContext disableOutputStreamFlush( + final boolean disableOutputStreamFlush) { + this.disableOutputStreamFlush = disableOutputStreamFlush; + return this; + } + + public AbfsOutputStreamContext withStreamStatistics( + final AbfsOutputStreamStatistics streamStatistics) { + this.streamStatistics = streamStatistics; + return this; + } + + public AbfsOutputStreamContext withAppendBlob( + final boolean isAppendBlob) { + this.isAppendBlob = isAppendBlob; + return this; + } + + public AbfsOutputStreamContext build() { + // Validation of parameters to be done here. + return this; + } + + public AbfsOutputStreamContext withWriteMaxConcurrentRequestCount( + final int writeMaxConcurrentRequestCount) { + this.writeMaxConcurrentRequestCount = writeMaxConcurrentRequestCount; + return this; + } + + public AbfsOutputStreamContext withMaxWriteRequestsToQueue( + final int maxWriteRequestsToQueue) { + this.maxWriteRequestsToQueue = maxWriteRequestsToQueue; + return this; + } + + public int getWriteBufferSize() { + return writeBufferSize; + } + + public boolean isEnableFlush() { + return enableFlush; + } + + public boolean isDisableOutputStreamFlush() { + return disableOutputStreamFlush; + } + + public AbfsOutputStreamStatistics getStreamStatistics() { + return streamStatistics; + } + + public boolean isAppendBlob() { + return isAppendBlob; + } + + public int getWriteMaxConcurrentRequestCount() { + return this.writeMaxConcurrentRequestCount; + } + + public int getMaxWriteRequestsToQueue() { + return this.maxWriteRequestsToQueue; + } + + public boolean isEnableSmallWriteOptimization() { + return this.enableSmallWriteOptimization; + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamStatistics.java new file mode 100644 index 0000000000000..c57d5d9bcaadd --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamStatistics.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.IOStatisticsSource; + +/** + * Interface for {@link AbfsOutputStream} statistics. + */ +@InterfaceStability.Unstable +public interface AbfsOutputStreamStatistics extends IOStatisticsSource { + + /** + * Number of bytes to be uploaded. + * + * @param bytes number of bytes to upload. + */ + void bytesToUpload(long bytes); + + /** + * Records a successful upload and the number of bytes uploaded. + * + * @param bytes number of bytes that were successfully uploaded. + */ + void uploadSuccessful(long bytes); + + /** + * Records that upload is failed and the number of bytes. + * + * @param bytes number of bytes that failed to upload. + */ + void uploadFailed(long bytes); + + /** + * Time spent in waiting for tasks to be completed in the blocking queue. + * @return instance of the DurationTracker that tracks the time for waiting. + */ + DurationTracker timeSpentTaskWait(); + + /** + * Number of times task queue is shrunk. + */ + void queueShrunk(); + + /** + * Number of times buffer is written to the service after a write operation. + */ + void writeCurrentBuffer(); + + /** + * Get the IOStatisticsStore instance from AbfsOutputStreamStatistics. + * @return instance of IOStatisticsStore which extends IOStatistics. + */ + IOStatistics getIOStatistics(); + + /** + * Method to form a string of all AbfsOutputStream statistics and their + * values. + * + * @return AbfsOutputStream statistics. + */ + @Override + String toString(); + +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamStatisticsImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamStatisticsImpl.java new file mode 100644 index 0000000000000..b07cf28a710de --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStreamStatisticsImpl.java @@ -0,0 +1,200 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + +import org.apache.hadoop.fs.statistics.DurationTracker; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.StoreStatisticNames; +import org.apache.hadoop.fs.statistics.StreamStatisticNames; +import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; + +import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore; + +/** + * OutputStream statistics implementation for Abfs. + */ +public class AbfsOutputStreamStatisticsImpl + implements AbfsOutputStreamStatistics { + + private final IOStatisticsStore ioStatisticsStore = iostatisticsStore() + .withCounters( + StreamStatisticNames.BYTES_TO_UPLOAD, + StreamStatisticNames.BYTES_UPLOAD_SUCCESSFUL, + StreamStatisticNames.BYTES_UPLOAD_FAILED, + StreamStatisticNames.QUEUE_SHRUNK_OPS, + StreamStatisticNames.WRITE_CURRENT_BUFFER_OPERATIONS + ) + .withDurationTracking( + StreamStatisticNames.TIME_SPENT_ON_PUT_REQUEST, + StreamStatisticNames.TIME_SPENT_ON_TASK_WAIT + ) + .build(); + + /* Reference to the atomic counter for frequently updated counters to avoid + * cost of the map lookup on every increment. + */ + private final AtomicLong bytesUpload = + ioStatisticsStore.getCounterReference(StreamStatisticNames.BYTES_TO_UPLOAD); + private final AtomicLong bytesUploadedSuccessfully = + ioStatisticsStore.getCounterReference(StreamStatisticNames.BYTES_UPLOAD_SUCCESSFUL); + private final AtomicLong writeCurrentBufferOps = + ioStatisticsStore.getCounterReference(StreamStatisticNames.WRITE_CURRENT_BUFFER_OPERATIONS); + + /** + * Records the need to upload bytes and increments the total bytes that + * needs to be uploaded. + * + * @param bytes total bytes to upload. Negative bytes are ignored. + */ + @Override + public void bytesToUpload(long bytes) { + bytesUpload.addAndGet(bytes); + } + + /** + * Records the total bytes successfully uploaded through AbfsOutputStream. + * + * @param bytes number of bytes that were successfully uploaded. Negative + * bytes are ignored. + */ + @Override + public void uploadSuccessful(long bytes) { + bytesUploadedSuccessfully.addAndGet(bytes); + } + + /** + * Records the total bytes failed to upload through AbfsOutputStream. + * + * @param bytes number of bytes failed to upload. Negative bytes are ignored. + */ + @Override + public void uploadFailed(long bytes) { + ioStatisticsStore.incrementCounter(StreamStatisticNames.BYTES_UPLOAD_FAILED, bytes); + } + + /** + * {@inheritDoc} + * + * Records the total time spent waiting for a task to complete. + * + * When the thread executor has a task queue + * {@link java.util.concurrent.BlockingQueue} of size greater than or + * equal to 2 times the maxConcurrentRequestCounts then, it waits for a + * task in that queue to finish, then do the next task in the queue. + * + * This time spent while waiting for the task to be completed is being + * recorded in this counter. + * + */ + @Override + public DurationTracker timeSpentTaskWait() { + return ioStatisticsStore.trackDuration(StreamStatisticNames.TIME_SPENT_ON_TASK_WAIT); + } + + /** + * {@inheritDoc} + * + * Records the number of times AbfsOutputStream try to remove the completed + * write operations from the beginning of write operation task queue. + */ + @Override + public void queueShrunk() { + ioStatisticsStore.incrementCounter(StreamStatisticNames.QUEUE_SHRUNK_OPS); + } + + /** + * {@inheritDoc} + * + * Records the number of times AbfsOutputStream writes the buffer to the + * service via the AbfsClient and appends the buffer to the service. + */ + @Override + public void writeCurrentBuffer() { + writeCurrentBufferOps.incrementAndGet(); + } + + /** + * {@inheritDoc} + * + * A getter for IOStatisticsStore instance which extends IOStatistics. + * + * @return IOStatisticsStore instance. + */ + @Override + public IOStatistics getIOStatistics() { + return ioStatisticsStore; + } + + @VisibleForTesting + public long getBytesToUpload() { + return ioStatisticsStore.counters().get(StreamStatisticNames.BYTES_TO_UPLOAD); + } + + @VisibleForTesting + public long getBytesUploadSuccessful() { + return ioStatisticsStore.counters().get(StreamStatisticNames.BYTES_UPLOAD_SUCCESSFUL); + } + + @VisibleForTesting + public long getBytesUploadFailed() { + return ioStatisticsStore.counters().get(StreamStatisticNames.BYTES_UPLOAD_FAILED); + } + + @VisibleForTesting + public long getTimeSpentOnTaskWait() { + return ioStatisticsStore.counters().get(StreamStatisticNames.TIME_SPENT_ON_TASK_WAIT); + } + + @VisibleForTesting + public long getQueueShrunkOps() { + return ioStatisticsStore.counters().get(StreamStatisticNames.QUEUE_SHRUNK_OPS); + } + + @VisibleForTesting + public long getWriteCurrentBufferOperations() { + return ioStatisticsStore.counters().get(StreamStatisticNames.WRITE_CURRENT_BUFFER_OPERATIONS); + } + + /** + * Getter for mean value of time taken to complete a PUT request by + * AbfsOutputStream. + * @return mean value. + */ + @VisibleForTesting + public double getTimeSpentOnPutRequest() { + return ioStatisticsStore.meanStatistics().get(StreamStatisticNames.TIME_SPENT_ON_PUT_REQUEST + StoreStatisticNames.SUFFIX_MEAN).mean(); + } + + /** + * String to show AbfsOutputStream statistics values in AbfsOutputStream. + * + * @return String with AbfsOutputStream statistics. + */ + @Override public String toString() { + final StringBuilder outputStreamStats = new StringBuilder( + "OutputStream Statistics{"); + outputStreamStats.append(ioStatisticsStore.toString()); + outputStreamStats.append("}"); + return outputStreamStats.toString(); + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java index 445c3665437c7..24ec2926647e6 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java @@ -24,9 +24,11 @@ import java.net.UnknownHostException; import java.util.List; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.azurebfs.AbfsStatistic; import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; @@ -53,6 +55,9 @@ public class AbfsRestOperation { // request body and all the download methods have a response body. private final boolean hasRequestBody; + // Used only by AbfsInputStream/AbfsOutputStream to reuse SAS tokens. + private final String sasToken; + private static final Logger LOG = LoggerFactory.getLogger(AbfsClient.class); // For uploads, this is the request entity body. For downloads, @@ -60,13 +65,36 @@ public class AbfsRestOperation { private byte[] buffer; private int bufferOffset; private int bufferLength; + private int retryCount = 0; private AbfsHttpOperation result; + private AbfsCounters abfsCounters; public AbfsHttpOperation getResult() { return result; } + public void hardSetResult(int httpStatus) { + result = AbfsHttpOperation.getAbfsHttpOperationWithFixedResult(this.url, + this.method, httpStatus); + } + + public URL getUrl() { + return url; + } + + public List getRequestHeaders() { + return requestHeaders; + } + + public boolean isARetriedRequest() { + return (retryCount > 0); + } + + String getSasToken() { + return sasToken; + } + /** * Initializes a new REST operation. * @@ -80,6 +108,24 @@ public AbfsHttpOperation getResult() { final String method, final URL url, final List requestHeaders) { + this(operationType, client, method, url, requestHeaders, null); + } + + /** + * Initializes a new REST operation. + * + * @param client The Blob FS client. + * @param method The HTTP method (PUT, PATCH, POST, GET, HEAD, or DELETE). + * @param url The full URL including query string parameters. + * @param requestHeaders The HTTP request headers. + * @param sasToken A sasToken for optional re-use by AbfsInputStream/AbfsOutputStream. + */ + AbfsRestOperation(final AbfsRestOperationType operationType, + final AbfsClient client, + final String method, + final URL url, + final List requestHeaders, + final String sasToken) { this.operationType = operationType; this.client = client; this.method = method; @@ -87,6 +133,8 @@ public AbfsHttpOperation getResult() { this.requestHeaders = requestHeaders; this.hasRequestBody = (AbfsHttpConstants.HTTP_METHOD_PUT.equals(method) || AbfsHttpConstants.HTTP_METHOD_PATCH.equals(method)); + this.sasToken = sasToken; + this.abfsCounters = client.getAbfsCounters(); } /** @@ -101,6 +149,7 @@ public AbfsHttpOperation getResult() { * this will hold the response entity body. * @param bufferOffset An offset into the buffer where the data beings. * @param bufferLength The length of the data in the buffer. + * @param sasToken A sasToken for optional re-use by AbfsInputStream/AbfsOutputStream. */ AbfsRestOperation(AbfsRestOperationType operationType, AbfsClient client, @@ -109,18 +158,21 @@ public AbfsHttpOperation getResult() { List requestHeaders, byte[] buffer, int bufferOffset, - int bufferLength) { - this(operationType, client, method, url, requestHeaders); + int bufferLength, + String sasToken) { + this(operationType, client, method, url, requestHeaders, sasToken); this.buffer = buffer; this.bufferOffset = bufferOffset; this.bufferLength = bufferLength; + this.abfsCounters = client.getAbfsCounters(); } /** * Executes the REST operation with retry, by issuing one or more * HTTP operations. */ - void execute() throws AzureBlobFileSystemException { + @VisibleForTesting + public void execute() throws AzureBlobFileSystemException { // see if we have latency reports from the previous requests String latencyHeader = this.client.getAbfsPerfTracker().getClientLatency(); if (latencyHeader != null && !latencyHeader.isEmpty()) { @@ -129,10 +181,11 @@ void execute() throws AzureBlobFileSystemException { requestHeaders.add(httpHeader); } - int retryCount = 0; + retryCount = 0; LOG.debug("First execution of REST operation - {}", operationType); - while (!executeHttpOperation(retryCount++)) { + while (!executeHttpOperation(retryCount)) { try { + ++retryCount; LOG.debug("Retrying REST operation {}. RetryCount = {}", operationType, retryCount); Thread.sleep(client.getRetryPolicy().getRetryInterval(retryCount)); @@ -159,6 +212,7 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS try { // initialize the HTTP request and open the connection httpOperation = new AbfsHttpOperation(url, method, requestHeaders); + incrementCounter(AbfsStatistic.CONNECTIONS_MADE, 1); switch(client.getAuthType()) { case Custom: @@ -183,19 +237,35 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS // dump the headers AbfsIoUtils.dumpHeadersToDebugLog("Request Headers", httpOperation.getConnection().getRequestProperties()); - AbfsClientThrottlingIntercept.sendingRequest(operationType); + AbfsClientThrottlingIntercept.sendingRequest(operationType, abfsCounters); if (hasRequestBody) { // HttpUrlConnection requires httpOperation.sendRequest(buffer, bufferOffset, bufferLength); + incrementCounter(AbfsStatistic.SEND_REQUESTS, 1); + incrementCounter(AbfsStatistic.BYTES_SENT, bufferLength); } httpOperation.processResponse(buffer, bufferOffset, bufferLength); - } catch (IOException ex) { - if (ex instanceof UnknownHostException) { - LOG.warn(String.format("Unknown host name: %s. Retrying to resolve the host name...", httpOperation.getUrl().getHost())); + incrementCounter(AbfsStatistic.GET_RESPONSES, 1); + //Only increment bytesReceived counter when the status code is 2XX. + if (httpOperation.getStatusCode() >= HttpURLConnection.HTTP_OK + && httpOperation.getStatusCode() <= HttpURLConnection.HTTP_PARTIAL) { + incrementCounter(AbfsStatistic.BYTES_RECEIVED, + httpOperation.getBytesReceived()); } - + } catch (UnknownHostException ex) { + String hostname = null; + if (httpOperation != null) { + hostname = httpOperation.getHost(); + } + LOG.warn("Unknown host name: %s. Retrying to resolve the host name...", + hostname); + if (!client.getRetryPolicy().shouldRetry(retryCount, -1)) { + throw new InvalidAbfsRestOperationException(ex); + } + return false; + } catch (IOException ex) { if (LOG.isDebugEnabled()) { if (httpOperation != null) { LOG.debug("HttpRequestFailure: " + httpOperation.toString(), ex); @@ -220,7 +290,7 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS AbfsClientThrottlingIntercept.updateMetrics(operationType, httpOperation); } - LOG.debug("HttpRequest: {}", httpOperation.toString()); + LOG.debug("HttpRequest: {}: {}", operationType, httpOperation.toString()); if (client.getRetryPolicy().shouldRetry(retryCount, httpOperation.getStatusCode())) { return false; @@ -230,4 +300,16 @@ private boolean executeHttpOperation(final int retryCount) throws AzureBlobFileS return true; } + + /** + * Incrementing Abfs counters with a long value. + * + * @param statistic the Abfs statistic that needs to be incremented. + * @param value the value to be incremented by. + */ + private void incrementCounter(AbfsStatistic statistic, long value) { + if (abfsCounters != null) { + abfsCounters.incrementCounter(statistic, value); + } + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsStreamContext.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsStreamContext.java new file mode 100644 index 0000000000000..9cd858cde818c --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsStreamContext.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +/** + * Base stream configuration class which is going + * to store common configs among input and output streams. + */ +public abstract class AbfsStreamContext { + private long sasTokenRenewPeriodForStreamsInSeconds; + + // hide default constructor + private AbfsStreamContext() { + } + + public AbfsStreamContext(final long sasTokenRenewPeriodForStreamsInSeconds) { + this.sasTokenRenewPeriodForStreamsInSeconds = sasTokenRenewPeriodForStreamsInSeconds; + } + + public long getSasTokenRenewPeriodForStreamsInSeconds() { + return sasTokenRenewPeriodForStreamsInSeconds; + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java index b272cf27ca0d7..9a75c78aa0612 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java @@ -21,6 +21,8 @@ import java.util.Random; import java.net.HttpURLConnection; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + /** * Retry policy used by AbfsClient. * */ @@ -138,4 +140,25 @@ public long getRetryInterval(final int retryCount) { return retryInterval; } + + @VisibleForTesting + int getRetryCount() { + return this.retryCount; + } + + @VisibleForTesting + int getMinBackoff() { + return this.minBackoff; + } + + @VisibleForTesting + int getMaxBackoff() { + return maxBackoff; + } + + @VisibleForTesting + int getDeltaBackoff() { + return this.deltaBackoff; + } + } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListingSupport.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListingSupport.java new file mode 100644 index 0000000000000..4c449409aafde --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListingSupport.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; + +@InterfaceAudience.Private +@InterfaceStability.Unstable +public interface ListingSupport { + + /** + * @param path The list path. + * @return the entries in the path. + * @throws IOException in case of error + */ + FileStatus[] listStatus(Path path) throws IOException; + + /** + * @param path Path the list path. + * @param startFrom The entry name that list results should start with. + * For example, if folder "/folder" contains four + * files: "afile", "bfile", "hfile", "ifile". Then + * listStatus(Path("/folder"), "hfile") will return + * "/folder/hfile" and "folder/ifile" Notice that if + * startFrom is a non-existent entry name, then the + * list response contains all entries after this + * non-existent entry in lexical order: listStatus + * (Path("/folder"), "cfile") will return + * "/folder/hfile" and "/folder/ifile". + * @return the entries in the path start from "startFrom" in lexical order. + * @throws IOException in case of error + */ + FileStatus[] listStatus(Path path, String startFrom) throws IOException; + + /** + * @param path The list path + * @param startFrom The entry name that list results should start with. + * For example, if folder "/folder" contains four + * files: "afile", "bfile", "hfile", "ifile". Then + * listStatus(Path("/folder"), "hfile") will return + * "/folder/hfile" and "folder/ifile" Notice that if + * startFrom is a non-existent entry name, then the + * list response contains all entries after this + * non-existent entry in lexical order: listStatus + * (Path("/folder"), "cfile") will return + * "/folder/hfile" and "/folder/ifile". + * @param fileStatuses This list has to be filled with the FileStatus objects + * @param fetchAll flag to indicate if the above list needs to be + * filled with just one page os results or the entire + * result. + * @param continuation Contiuation token. null means start rom the begining. + * @return Continuation tokem + * @throws IOException in case of error + */ + String listStatus(Path path, String startFrom, List fileStatuses, + boolean fetchAll, String continuation) throws IOException; +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java index 00e4f008ad0a8..5d55726222de7 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java @@ -18,10 +18,13 @@ package org.apache.hadoop.fs.azurebfs.services; +import java.io.IOException; import java.util.concurrent.CountDownLatch; import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus; +import static org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus.READ_FAILED; + class ReadBuffer { private AbfsInputStream stream; @@ -40,6 +43,8 @@ class ReadBuffer { private boolean isLastByteConsumed = false; private boolean isAnyByteConsumed = false; + private IOException errException = null; + public AbfsInputStream getStream() { return stream; } @@ -88,12 +93,23 @@ public void setBufferindex(int bufferindex) { this.bufferindex = bufferindex; } + public IOException getErrException() { + return errException; + } + + public void setErrException(final IOException errException) { + this.errException = errException; + } + public ReadBufferStatus getStatus() { return status; } public void setStatus(ReadBufferStatus status) { this.status = status; + if (status == READ_FAILED) { + bufferindex = -1; + } } public CountDownLatch getLatch() { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java index 5b71cf05225a8..f330d790eb843 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java @@ -21,23 +21,31 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.LinkedList; import java.util.Queue; import java.util.Stack; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The Read Buffer Manager for Rest AbfsClient. */ final class ReadBufferManager { private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class); + private static final int ONE_KB = 1024; + private static final int ONE_MB = ONE_KB * ONE_KB; private static final int NUM_BUFFERS = 16; - private static final int BLOCK_SIZE = 4 * 1024 * 1024; private static final int NUM_THREADS = 8; - private static final int THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold + private static final int DEFAULT_THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold + private static int blockSize = 4 * ONE_MB; + private static int thresholdAgeMilliseconds = DEFAULT_THRESHOLD_AGE_MILLISECONDS; private Thread[] threads = new Thread[NUM_THREADS]; private byte[][] buffers; // array of byte[] buffers, to hold the data that is read private Stack freeList = new Stack<>(); // indices in buffers[] array that are available @@ -45,21 +53,37 @@ final class ReadBufferManager { private Queue readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet private LinkedList inProgressList = new LinkedList<>(); // requests being processed by worker threads private LinkedList completedReadList = new LinkedList<>(); // buffers available for reading - private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block + private static ReadBufferManager bufferManager; // singleton, initialized in static initialization block + private static final ReentrantLock LOCK = new ReentrantLock(); - static { - BUFFER_MANAGER = new ReadBufferManager(); - BUFFER_MANAGER.init(); + static ReadBufferManager getBufferManager() { + if (bufferManager == null) { + LOCK.lock(); + try { + if (bufferManager == null) { + bufferManager = new ReadBufferManager(); + bufferManager.init(); + } + } finally { + LOCK.unlock(); + } + } + return bufferManager; } - static ReadBufferManager getBufferManager() { - return BUFFER_MANAGER; + static void setReadBufferManagerConfigs(int readAheadBlockSize) { + if (bufferManager == null) { + LOGGER.debug( + "ReadBufferManager not initialized yet. Overriding readAheadBlockSize as {}", + readAheadBlockSize); + blockSize = readAheadBlockSize; + } } private void init() { buffers = new byte[NUM_BUFFERS][]; for (int i = 0; i < NUM_BUFFERS; i++) { - buffers[i] = new byte[BLOCK_SIZE]; // same buffers are reused. The byte array never goes back to GC + buffers[i] = new byte[blockSize]; // same buffers are reused. The byte array never goes back to GC freeList.add(i); } for (int i = 0; i < NUM_THREADS; i++) { @@ -119,10 +143,10 @@ void queueReadAhead(final AbfsInputStream stream, final long requestedOffset, fi buffer.setBufferindex(bufferIndex); readAheadQueue.add(buffer); notifyAll(); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}", - stream.getPath(), requestedOffset, buffer.getBufferindex()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}", + stream.getPath(), requestedOffset, buffer.getBufferindex()); + } } } @@ -141,7 +165,8 @@ void queueReadAhead(final AbfsInputStream stream, final long requestedOffset, fi * @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0. * @return the number of bytes read */ - int getBlock(final AbfsInputStream stream, final long position, final int length, final byte[] buffer) { + int getBlock(final AbfsInputStream stream, final long position, final int length, final byte[] buffer) + throws IOException { // not synchronized, so have to be careful with locking if (LOGGER.isTraceEnabled()) { LOGGER.trace("getBlock for file {} position {} thread {}", @@ -213,6 +238,8 @@ private synchronized boolean tryEvict() { return false; // there are no evict-able buffers } + long currentTimeInMs = currentTimeMillis(); + // first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed) for (ReadBuffer buf : completedReadList) { if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) { @@ -237,23 +264,45 @@ private synchronized boolean tryEvict() { } // next, try any old nodes that have not been consumed + // Failed read buffers (with buffer index=-1) that are older than + // thresholdAge should be cleaned up, but at the same time should not + // report successful eviction. + // Queue logic expects that a buffer is freed up for read ahead when + // eviction is successful, whereas a failed ReadBuffer would have released + // its buffer when its status was set to READ_FAILED. long earliestBirthday = Long.MAX_VALUE; + ArrayList oldFailedBuffers = new ArrayList<>(); for (ReadBuffer buf : completedReadList) { - if (buf.getTimeStamp() < earliestBirthday) { + if ((buf.getBufferindex() != -1) + && (buf.getTimeStamp() < earliestBirthday)) { nodeToEvict = buf; earliestBirthday = buf.getTimeStamp(); + } else if ((buf.getBufferindex() == -1) + && (currentTimeInMs - buf.getTimeStamp()) > thresholdAgeMilliseconds) { + oldFailedBuffers.add(buf); } } - if ((currentTimeMillis() - earliestBirthday > THRESHOLD_AGE_MILLISECONDS) && (nodeToEvict != null)) { + + for (ReadBuffer buf : oldFailedBuffers) { + evict(buf); + } + + if ((currentTimeInMs - earliestBirthday > thresholdAgeMilliseconds) && (nodeToEvict != null)) { return evict(nodeToEvict); } + LOGGER.trace("No buffer eligible for eviction"); // nothing can be evicted return false; } private boolean evict(final ReadBuffer buf) { - freeList.push(buf.getBufferindex()); + // As failed ReadBuffers (bufferIndx = -1) are saved in completedReadList, + // avoid adding it to freeList. + if (buf.getBufferindex() != -1) { + freeList.push(buf.getBufferindex()); + } + completedReadList.remove(buf); if (LOGGER.isTraceEnabled()) { LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}", @@ -289,6 +338,27 @@ private ReadBuffer getFromList(final Collection list, final AbfsInpu return null; } + /** + * Returns buffers that failed or passed from completed queue. + * @param stream + * @param requestedOffset + * @return + */ + private ReadBuffer getBufferFromCompletedQueue(final AbfsInputStream stream, final long requestedOffset) { + for (ReadBuffer buffer : completedReadList) { + // Buffer is returned if the requestedOffset is at or above buffer's + // offset but less than buffer's length or the actual requestedLength + if ((buffer.getStream() == stream) + && (requestedOffset >= buffer.getOffset()) + && ((requestedOffset < buffer.getOffset() + buffer.getLength()) + || (requestedOffset < buffer.getOffset() + buffer.getRequestedLength()))) { + return buffer; + } + } + + return null; + } + private void clearFromReadAheadQueue(final AbfsInputStream stream, final long requestedOffset) { ReadBuffer buffer = getFromList(readAheadQueue, stream, requestedOffset); if (buffer != null) { @@ -299,11 +369,28 @@ private void clearFromReadAheadQueue(final AbfsInputStream stream, final long re } private int getBlockFromCompletedQueue(final AbfsInputStream stream, final long position, final int length, - final byte[] buffer) { - ReadBuffer buf = getFromList(completedReadList, stream, position); - if (buf == null || position >= buf.getOffset() + buf.getLength()) { + final byte[] buffer) throws IOException { + ReadBuffer buf = getBufferFromCompletedQueue(stream, position); + + if (buf == null) { return 0; } + + if (buf.getStatus() == ReadBufferStatus.READ_FAILED) { + // To prevent new read requests to fail due to old read-ahead attempts, + // return exception only from buffers that failed within last thresholdAgeMilliseconds + if ((currentTimeMillis() - (buf.getTimeStamp()) < thresholdAgeMilliseconds)) { + throw buf.getErrException(); + } else { + return 0; + } + } + + if ((buf.getStatus() != ReadBufferStatus.AVAILABLE) + || (position >= buf.getOffset() + buf.getLength())) { + return 0; + } + int cursor = (int) (position - buf.getOffset()); int availableLengthInBuffer = buf.getLength() - cursor; int lengthToCopy = Math.min(length, availableLengthInBuffer); @@ -368,14 +455,17 @@ void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final i inProgressList.remove(buffer); if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) { buffer.setStatus(ReadBufferStatus.AVAILABLE); - buffer.setTimeStamp(currentTimeMillis()); buffer.setLength(bytesActuallyRead); - completedReadList.add(buffer); } else { freeList.push(buffer.getBufferindex()); - // buffer should go out of scope after the end of the calling method in ReadBufferWorker, and eligible for GC + // buffer will be deleted as per the eviction policy. } + + buffer.setStatus(result); + buffer.setTimeStamp(currentTimeMillis()); + completedReadList.add(buffer); } + //outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results buffer.getLatch().countDown(); // wake up waiting threads (if any) } @@ -392,4 +482,97 @@ void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final i private long currentTimeMillis() { return System.nanoTime() / 1000 / 1000; } + + @VisibleForTesting + int getThresholdAgeMilliseconds() { + return thresholdAgeMilliseconds; + } + + @VisibleForTesting + static void setThresholdAgeMilliseconds(int thresholdAgeMs) { + thresholdAgeMilliseconds = thresholdAgeMs; + } + + @VisibleForTesting + int getCompletedReadListSize() { + return completedReadList.size(); + } + + @VisibleForTesting + void callTryEvict() { + tryEvict(); + } + + /** + * Test method that can clean up the current state of readAhead buffers and + * the lists. Will also trigger a fresh init. + */ + @VisibleForTesting + void testResetReadBufferManager() { + synchronized (this) { + ArrayList completedBuffers = new ArrayList<>(); + for (ReadBuffer buf : completedReadList) { + if (buf != null) { + completedBuffers.add(buf); + } + } + + for (ReadBuffer buf : completedBuffers) { + evict(buf); + } + + readAheadQueue.clear(); + inProgressList.clear(); + completedReadList.clear(); + freeList.clear(); + for (int i = 0; i < NUM_BUFFERS; i++) { + buffers[i] = null; + } + buffers = null; + resetBufferManager(); + } + } + + /** + * Reset buffer manager to null. + */ + @VisibleForTesting + static void resetBufferManager() { + bufferManager = null; + } + + /** + * Reset readAhead buffer to needed readAhead block size and + * thresholdAgeMilliseconds. + * @param readAheadBlockSize + * @param thresholdAgeMilliseconds + */ + @VisibleForTesting + void testResetReadBufferManager(int readAheadBlockSize, int thresholdAgeMilliseconds) { + setBlockSize(readAheadBlockSize); + setThresholdAgeMilliseconds(thresholdAgeMilliseconds); + testResetReadBufferManager(); + } + + @VisibleForTesting + static void setBlockSize(int readAheadBlockSize) { + blockSize = readAheadBlockSize; + } + + @VisibleForTesting + int getReadAheadBlockSize() { + return blockSize; + } + + /** + * Test method that can mimic no free buffers scenario and also add a ReadBuffer + * into completedReadList. This readBuffer will get picked up by TryEvict() + * next time a new queue request comes in. + * @param buf that needs to be added to completedReadlist + */ + @VisibleForTesting + void testMimicFullUseAndAddFailedBuffer(ReadBuffer buf) { + freeList.clear(); + completedReadList.add(buf); + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java index af69de0f089e9..41acd7e06f132 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.azurebfs.services; +import java.io.IOException; import java.util.concurrent.CountDownLatch; import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus; @@ -61,9 +62,18 @@ public void run() { if (buffer != null) { try { // do the actual read, from the file. - int bytesRead = buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, buffer.getRequestedLength()); + int bytesRead = buffer.getStream().readRemote( + buffer.getOffset(), + buffer.getBuffer(), + 0, + // If AbfsInputStream was created with bigger buffer size than + // read-ahead buffer size, make sure a valid length is passed + // for remote read + Math.min(buffer.getRequestedLength(), buffer.getBuffer().length)); + bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager } catch (Exception ex) { + buffer.setErrException(new IOException(ex)); bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0); } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/SimpleKeyProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/SimpleKeyProvider.java index 727e1b3fd3fdd..bb1ec9e4a3fb5 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/SimpleKeyProvider.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/SimpleKeyProvider.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.KeyProviderException; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException; +import org.apache.hadoop.fs.azurebfs.diagnostics.Base64StringConfigurationBasicValidator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,7 +44,10 @@ public String getStorageAccountKey(String accountName, Configuration rawConfig) try { AbfsConfiguration abfsConfig = new AbfsConfiguration(rawConfig, accountName); key = abfsConfig.getPasswordString(ConfigurationKeys.FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME); - } catch(IllegalAccessException | InvalidConfigurationValueException e) { + + // Validating the key. + validateStorageAccountKey(key); + } catch (IllegalAccessException | InvalidConfigurationValueException e) { throw new KeyProviderException("Failure to initialize configuration", e); } catch(IOException ioe) { LOG.warn("Unable to get key from credential providers. {}", ioe); @@ -51,4 +55,18 @@ public String getStorageAccountKey(String accountName, Configuration rawConfig) return key; } + + /** + * A method to validate the storage key. + * + * @param key the key to be validated. + * @throws InvalidConfigurationValueException + */ + private void validateStorageAccountKey(String key) + throws InvalidConfigurationValueException { + Base64StringConfigurationBasicValidator validator = new Base64StringConfigurationBasicValidator( + ConfigurationKeys.FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME, "", true); + + validator.validate(key); + } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/CachedSASToken.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/CachedSASToken.java new file mode 100644 index 0000000000000..17a9125b06952 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/CachedSASToken.java @@ -0,0 +1,207 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS; +import static java.time.temporal.ChronoUnit.SECONDS; + +/** + * CachedSASToken provides simple utility for managing renewal + * of SAS tokens used by Input/OutputStream. This enables SAS re-use + * and reduces calls to the SASTokenProvider. + */ +public final class CachedSASToken { + public static final Logger LOG = LoggerFactory.getLogger(CachedSASToken.class); + private final long minExpirationInSeconds; + private String sasToken; + private OffsetDateTime sasExpiry; + + /** + * Create instance with default minimum expiration. SAS tokens are + * automatically renewed when their expiration is within this period. + */ + public CachedSASToken() { + this(DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS); + } + + /** + * Create instance with specified minimum expiration. SAS tokens are + * automatically renewed when their expiration is within this period. + * @param minExpirationInSeconds + */ + public CachedSASToken(long minExpirationInSeconds) { + this.minExpirationInSeconds = minExpirationInSeconds; + } + + /** + * Checks if the SAS token is expired or near expiration. + * @param expiry + * @param minExpiryInSeconds + * @return true if the SAS is near sasExpiry; otherwise false + */ + private static boolean isNearExpiry(OffsetDateTime expiry, long minExpiryInSeconds) { + if (expiry == OffsetDateTime.MIN) { + return true; + } + OffsetDateTime utcNow = OffsetDateTime.now(ZoneOffset.UTC); + return utcNow.until(expiry, SECONDS) <= minExpiryInSeconds; + } + + /** + * Parse the sasExpiry from the SAS token. The sasExpiry is the minimum + * of the ske and se parameters. The se parameter is required and the + * ske parameter is optional. + * @param token an Azure Storage SAS token + * @return the sasExpiry or OffsetDateTime.MIN if invalid. + */ + private static OffsetDateTime getExpiry(String token) { + // return MIN for all invalid input, including a null token + if (token == null) { + return OffsetDateTime.MIN; + } + + String signedExpiry = "se="; + int signedExpiryLen = 3; + + int start = token.indexOf(signedExpiry); + + // return MIN if the required se parameter is absent + if (start == -1) { + return OffsetDateTime.MIN; + } + + start += signedExpiryLen; + + // extract the value of se parameter + int end = token.indexOf("&", start); + String seValue = (end == -1) ? token.substring(start) : token.substring(start, end); + + try { + seValue = URLDecoder.decode(seValue, "utf-8"); + } catch (UnsupportedEncodingException ex) { + LOG.error("Error decoding se query parameter ({}) from SAS.", seValue, ex); + return OffsetDateTime.MIN; + } + + // parse the ISO 8601 date value; return MIN if invalid + OffsetDateTime seDate = OffsetDateTime.MIN; + try { + seDate = OffsetDateTime.parse(seValue, DateTimeFormatter.ISO_DATE_TIME); + } catch (DateTimeParseException ex) { + LOG.error("Error parsing se query parameter ({}) from SAS.", seValue, ex); + } + + String signedKeyExpiry = "ske="; + int signedKeyExpiryLen = 4; + + // if ske is present, the sasExpiry is the minimum of ske and se + start = token.indexOf(signedKeyExpiry); + + // return seDate if ske is absent + if (start == -1) { + return seDate; + } + + start += signedKeyExpiryLen; + + // extract the value of ske parameter + end = token.indexOf("&", start); + String skeValue = (end == -1) ? token.substring(start) : token.substring(start, end); + + try { + skeValue = URLDecoder.decode(skeValue, "utf-8"); + } catch (UnsupportedEncodingException ex) { + LOG.error("Error decoding ske query parameter ({}) from SAS.", skeValue, ex); + return OffsetDateTime.MIN; + } + + // parse the ISO 8601 date value; return MIN if invalid + OffsetDateTime skeDate = OffsetDateTime.MIN; + try { + skeDate = OffsetDateTime.parse(skeValue, DateTimeFormatter.ISO_DATE_TIME); + } catch (DateTimeParseException ex) { + LOG.error("Error parsing ske query parameter ({}) from SAS.", skeValue, ex); + return OffsetDateTime.MIN; + } + + return skeDate.isBefore(seDate) ? skeDate : seDate; + } + + /** + * Updates the cached SAS token and expiry. If the token is invalid, the cached value + * is cleared by setting it to null and the expiry to MIN. + * @param token an Azure Storage SAS token + */ + public void update(String token) { + // quickly return if token and cached sasToken are the same reference + // Note: use of operator == is intentional + if (token == sasToken) { + return; + } + OffsetDateTime newExpiry = getExpiry(token); + boolean isInvalid = isNearExpiry(newExpiry, minExpirationInSeconds); + synchronized (this) { + if (isInvalid) { + sasToken = null; + sasExpiry = OffsetDateTime.MIN; + } else { + sasToken = token; + sasExpiry = newExpiry; + } + } + } + + /** + * Gets the token if still valid. + * @return the token or null if it is expired or near sasExpiry. + */ + public String get() { + // quickly return null if not set + if (sasToken == null) { + return null; + } + String token; + OffsetDateTime exp; + synchronized (this) { + token = sasToken; + exp = sasExpiry; + } + boolean isInvalid = isNearExpiry(exp, minExpirationInSeconds); + return isInvalid ? null : token; + } + + @VisibleForTesting + void setForTesting(String token, OffsetDateTime expiry) { + synchronized (this) { + sasToken = token; + sasExpiry = expiry; + } + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/DateTimeUtils.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/DateTimeUtils.java new file mode 100644 index 0000000000000..0461869681252 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/DateTimeUtils.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.time.Instant; +import java.util.Date; +import java.util.Locale; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS; + +public final class DateTimeUtils { + private static final Logger LOG = LoggerFactory.getLogger(DateTimeUtils.class); + private static final String DATE_TIME_PATTERN = "E, dd MMM yyyy HH:mm:ss z"; + + public static long parseLastModifiedTime(final String lastModifiedTime) { + long parsedTime = 0; + try { + Date utcDate = new SimpleDateFormat(DATE_TIME_PATTERN, Locale.US) + .parse(lastModifiedTime); + parsedTime = utcDate.getTime(); + } catch (ParseException e) { + LOG.error("Failed to parse the date {}", lastModifiedTime); + } finally { + return parsedTime; + } + } + + /** + * Tries to identify if an operation was recently executed based on the LMT of + * a file or folder. LMT needs to be more recent that the original request + * start time. To include any clock skew with server, LMT within + * DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS from the request start time is going + * to be considered to qualify for recent operation. + * @param lastModifiedTime File/Folder LMT + * @param expectedLMTUpdateTime original request timestamp which should + * have updated the LMT on target + * @return true if the LMT is within timespan for recent operation, else false + */ + public static boolean isRecentlyModified(final String lastModifiedTime, + final Instant expectedLMTUpdateTime) { + long lmtEpochTime = DateTimeUtils.parseLastModifiedTime(lastModifiedTime); + long currentEpochTime = expectedLMTUpdateTime.toEpochMilli(); + + return ((lmtEpochTime > currentEpochTime) + || ((currentEpochTime - lmtEpochTime) <= DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS)); + } + + private DateTimeUtils() { + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/IdentityHandler.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/IdentityHandler.java new file mode 100644 index 0000000000000..7f866925dfd7c --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/IdentityHandler.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs.utils; + +import java.io.IOException; + + +/** + * {@code IdentityHandler} defines the set of methods to support various + * identity lookup services. + */ +public interface IdentityHandler { + + /** + * Perform lookup from Service Principal's Object ID to Username. + * @param originalIdentity AAD object ID. + * @return User name, if no name found returns empty string. + * */ + String lookupForLocalUserIdentity(String originalIdentity) throws IOException; + + /** + * Perform lookup from Security Group's Object ID to Security Group name. + * @param originalIdentity AAD object ID. + * @return Security group name, if no name found returns empty string. + * */ + String lookupForLocalGroupIdentity(String originalIdentity) throws IOException; +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TextFileBasedIdentityHandler.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TextFileBasedIdentityHandler.java new file mode 100644 index 0000000000000..523fd3179405a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TextFileBasedIdentityHandler.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs.utils; + +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.LineIterator; + +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.COLON; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.EMPTY_STRING; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.HASH; + + +/** + * {@code TextFileBasedIdentityHandler} is a {@link IdentityHandler} implements + * translation operation which returns identity mapped to AAD identity by + * loading the mapping file from the configured location. Location of the + * mapping file should be configured in {@code core-site.xml}. + *

    + * User identity file should be delimited by colon in below format. + *

    + * # OBJ_ID:USER_NAME:USER_ID:GROUP_ID:SPI_NAME:APP_ID
    + * 
    + * + * Example: + *
    + * a2b27aec-77bd-46dd-8c8c-39611a333331:user1:11000:21000:spi-user1:abcf86e9-5a5b-49e2-a253-f5c9e2afd4ec
    + * 
    + * + * Group identity file should be delimited by colon in below format. + *
    + * # OBJ_ID:GROUP_NAME:GROUP_ID:SGP_NAME
    + * 
    + * + * Example: + *
    + * 1d23024d-957c-4456-aac1-a57f9e2de914:group1:21000:sgp-group1
    + * 
    + */ +public class TextFileBasedIdentityHandler implements IdentityHandler { + private static final Logger LOG = LoggerFactory.getLogger(TextFileBasedIdentityHandler.class); + + /** + * Expected no of fields in the user mapping file. + */ + private static final int NO_OF_FIELDS_USER_MAPPING = 6; + /** + * Expected no of fields in the group mapping file. + */ + private static final int NO_OF_FIELDS_GROUP_MAPPING = 4; + /** + * Array index for the local username. + * Example: + * a2b27aec-77bd-46dd-8c8c-39611a333331:user1:11000:21000:spi-user1:abcf86e9-5a5b-49e2-a253-f5c9e2afd4ec + */ + private static final int ARRAY_INDEX_FOR_LOCAL_USER_NAME = 1; + /** + * Array index for the security group name. + * Example: + * 1d23024d-957c-4456-aac1-a57f9e2de914:group1:21000:sgp-group1 + */ + private static final int ARRAY_INDEX_FOR_LOCAL_GROUP_NAME = 1; + /** + * Array index for the AAD Service Principal's Object ID. + */ + private static final int ARRAY_INDEX_FOR_AAD_SP_OBJECT_ID = 0; + /** + * Array index for the AAD Security Group's Object ID. + */ + private static final int ARRAY_INDEX_FOR_AAD_SG_OBJECT_ID = 0; + private String userMappingFileLocation; + private String groupMappingFileLocation; + private HashMap userMap; + private HashMap groupMap; + + public TextFileBasedIdentityHandler(String userMappingFilePath, String groupMappingFilePath) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(userMappingFilePath), + "Local User to Service Principal mapping filePath cannot by Null or Empty"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(groupMappingFilePath), + "Local Group to Security Group mapping filePath cannot by Null or Empty"); + this.userMappingFileLocation = userMappingFilePath; + this.groupMappingFileLocation = groupMappingFilePath; + //Lazy Loading + this.userMap = new HashMap<>(); + this.groupMap = new HashMap<>(); + } + + /** + * Perform lookup from Service Principal's Object ID to Local Username. + * @param originalIdentity AAD object ID. + * @return Local User name, if no name found or on exception, returns empty string. + * */ + public synchronized String lookupForLocalUserIdentity(String originalIdentity) throws IOException { + if(Strings.isNullOrEmpty(originalIdentity)) { + return EMPTY_STRING; + } + + if (userMap.size() == 0) { + loadMap(userMap, userMappingFileLocation, NO_OF_FIELDS_USER_MAPPING, ARRAY_INDEX_FOR_AAD_SP_OBJECT_ID); + } + + try { + String username = !Strings.isNullOrEmpty(userMap.get(originalIdentity)) + ? userMap.get(originalIdentity).split(COLON)[ARRAY_INDEX_FOR_LOCAL_USER_NAME] : EMPTY_STRING; + + return username; + } catch (ArrayIndexOutOfBoundsException e) { + LOG.error("Error while parsing the line, returning empty string", e); + return EMPTY_STRING; + } + } + + /** + * Perform lookup from Security Group's Object ID to Local Security Group name. + * @param originalIdentity AAD object ID. + * @return Local Security group name, if no name found or on exception, returns empty string. + * */ + public synchronized String lookupForLocalGroupIdentity(String originalIdentity) throws IOException { + if(Strings.isNullOrEmpty(originalIdentity)) { + return EMPTY_STRING; + } + + if (groupMap.size() == 0) { + loadMap(groupMap, groupMappingFileLocation, NO_OF_FIELDS_GROUP_MAPPING, + ARRAY_INDEX_FOR_AAD_SG_OBJECT_ID); + } + + try { + String groupname = + !Strings.isNullOrEmpty(groupMap.get(originalIdentity)) + ? groupMap.get(originalIdentity).split(COLON)[ARRAY_INDEX_FOR_LOCAL_GROUP_NAME] : EMPTY_STRING; + + return groupname; + } catch (ArrayIndexOutOfBoundsException e) { + LOG.error("Error while parsing the line, returning empty string", e); + return EMPTY_STRING; + } + } + + /** + * Creates the map from the file using the key index. + * @param cache Instance of cache object to store the data. + * @param fileLocation Location of the file to be loaded. + * @param keyIndex Index of the key from the data loaded from the key. + */ + private static void loadMap(HashMap cache, String fileLocation, int noOfFields, int keyIndex) + throws IOException { + LOG.debug("Loading identity map from file {}", fileLocation); + int errorRecord = 0; + File file = new File(fileLocation); + LineIterator it = null; + try { + it = FileUtils.lineIterator(file, "UTF-8"); + while (it.hasNext()) { + String line = it.nextLine(); + if (!Strings.isNullOrEmpty(line.trim()) && !line.startsWith(HASH)) { + if (line.split(COLON).length != noOfFields) { + errorRecord += 1; + continue; + } + cache.put(line.split(COLON)[keyIndex], line); + } + } + LOG.debug("Loaded map stats - File: {}, Loaded: {}, Error: {} ", fileLocation, cache.size(), errorRecord); + } catch (ArrayIndexOutOfBoundsException e) { + LOG.error("Error while parsing mapping file", e); + } finally { + LineIterator.closeQuietly(it); + } + } +} diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md index 01c1fbd03b35b..33d4a0fa428a0 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md @@ -257,7 +257,8 @@ will have the URL `abfs://container1@abfswales1.dfs.core.windows.net/` You can create a new container through the ABFS connector, by setting the option - `fs.azure.createRemoteFileSystemDuringInitialization` to `true`. + `fs.azure.createRemoteFileSystemDuringInitialization` to `true`. Though the + same is not supported when AuthType is SAS. If the container does not exist, an attempt to list it with `hadoop fs -ls` will fail @@ -313,16 +314,38 @@ driven by them. 1. Using OAuth 2.0 tokens of one form or another. 1. Deployed in-Azure with the Azure VMs providing OAuth 2.0 tokens to the application, "Managed Instance". +1. Using Shared Access Signature (SAS) tokens provided by a custom implementation of the SASTokenProvider interface. What can be changed is what secrets/credentials are used to authenticate the caller. -The authentication mechanism is set in `fs.azure.account.auth.type` (or the account specific variant), -and, for the various OAuth options `fs.azure.account.oauth.provider.type` +The authentication mechanism is set in `fs.azure.account.auth.type` (or the +account specific variant). The possible values are SharedKey, OAuth, Custom +and SAS. For the various OAuth options use the config `fs.azure.account +.oauth.provider.type`. Following are the implementations supported +ClientCredsTokenProvider, UserPasswordTokenProvider, MsiTokenProvider and +RefreshTokenBasedTokenProvider. An IllegalArgumentException is thrown if +the specified provider type is not one of the supported. All secrets can be stored in JCEKS files. These are encrypted and password protected —use them or a compatible Hadoop Key Management Store wherever possible +### AAD Token fetch retries + +The exponential retry policy used for the AAD token fetch retries can be tuned +with the following configurations. +* `fs.azure.oauth.token.fetch.retry.max.retries`: Sets the maximum number of + retries. Default value is 5. +* `fs.azure.oauth.token.fetch.retry.min.backoff.interval`: Minimum back-off + interval. Added to the retry interval computed from delta backoff. By + default this si set as 0. Set the interval in milli seconds. +* `fs.azure.oauth.token.fetch.retry.max.backoff.interval`: Maximum back-off +interval. Default value is 60000 (sixty seconds). Set the interval in milli +seconds. +* `fs.azure.oauth.token.fetch.retry.delta.backoff`: Back-off interval between +retries. Multiples of this timespan are used for subsequent retry attempts + . The default value is 2. + ### Default: Shared Key This is the simplest authentication mechanism of account + password. @@ -349,6 +372,15 @@ the password, "key", retrieved from the XML/JCECKs configuration files. *Note*: The source of the account key can be changed through a custom key provider; one exists to execute a shell script to retrieve it. +A custom key provider class can be provided with the config +`fs.azure.account.keyprovider`. If a key provider class is specified the same +will be used to get account key. Otherwise the Simple key provider will be used +which will use the key specified for the config `fs.azure.account.key`. + +To retrieve using shell script, specify the path to the script for the config +`fs.azure.shellkeyprovider.script`. ShellDecryptionKeyProvider class use the +script specified to retrieve the key. + ### OAuth 2.0 Client Credentials OAuth 2.0 credentials of (client id, client secret, endpoint) are provided in the configuration/JCEKS file. @@ -464,6 +496,13 @@ With an existing Oauth 2.0 token, make a request of the Active Directory endpoin Refresh token + + fs.azure.account.oauth2.refresh.endpoint + + + Refresh token endpoint + + fs.azure.account.oauth2.client.id @@ -505,6 +544,13 @@ The Azure Portal/CLI is used to create the service identity. Optional MSI Tenant ID + + fs.azure.account.oauth2.msi.endpoint + + + MSI endpoint + + fs.azure.account.oauth2.client.id @@ -539,6 +585,46 @@ token when its `getAccessToken()` method is invoked. The declared class must implement `org.apache.hadoop.fs.azurebfs.extensions.CustomTokenProviderAdaptee` and optionally `org.apache.hadoop.fs.azurebfs.extensions.BoundDTExtension`. +The declared class also holds responsibility to implement retry logic while fetching access tokens. + +### Delegation Token Provider + +A delegation token provider supplies the ABFS connector with delegation tokens, +helps renew and cancel the tokens by implementing the +CustomDelegationTokenManager interface. + +```xml + + fs.azure.enable.delegation.token + true + Make this true to use delegation token provider + + + fs.azure.delegation.token.provider.type + {fully-qualified-class-name-for-implementation-of-CustomDelegationTokenManager-interface} + +``` +In case delegation token is enabled, and the config `fs.azure.delegation.token +.provider.type` is not provided then an IlleagalArgumentException is thrown. + +### Shared Access Signature (SAS) Token Provider + +A Shared Access Signature (SAS) token provider supplies the ABFS connector with SAS +tokens by implementing the SASTokenProvider interface. + +```xml + + fs.azure.account.auth.type + SAS + + + fs.azure.sas.token.provider.type + {fully-qualified-class-name-for-implementation-of-SASTokenProvider-interface} + +``` + +The declared class must implement `org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider`. + ## Technical notes ### Proxy setup @@ -661,10 +747,146 @@ Hflush() being the only documented API that can provide persistent data transfer, Flush() also attempting to persist buffered data will lead to performance issues. +### HNS Check Options +Config `fs.azure.account.hns.enabled` provides an option to specify whether + the storage account is HNS enabled or not. In case the config is not provided, + a server call is made to check the same. + ### Access Options Config `fs.azure.enable.check.access` needs to be set true to enable the AzureBlobFileSystem.access(). +### Operation Idempotency + +Requests failing due to server timeouts and network failures will be retried. +PUT/POST operations are idempotent and need no specific handling +except for Rename and Delete operations. + +Rename idempotency checks are made by ensuring the LastModifiedTime on destination +is recent if source path is found to be non-existent on retry. + +Delete is considered to be idempotent by default if the target does not exist on +retry. + +### Primary User Group Options +The group name which is part of FileStatus and AclStatus will be set the same as +the username if the following config is set to true +`fs.azure.skipUserGroupMetadataDuringInitialization`. + +### IO Options +The following configs are related to read and write operations. + +`fs.azure.io.retry.max.retries`: Sets the number of retries for IO operations. +Currently this is used only for the server call retry logic. Used within +AbfsClient class as part of the ExponentialRetryPolicy. The value should be +greater than or equal to 0. + +`fs.azure.write.request.size`: To set the write buffer size. Specify the value +in bytes. The value should be between 16384 to 104857600 both inclusive (16 KB +to 100 MB). The default value will be 8388608 (8 MB). + +`fs.azure.read.request.size`: To set the read buffer size.Specify the value in +bytes. The value should be between 16384 to 104857600 both inclusive (16 KB to +100 MB). The default value will be 4194304 (4 MB). + +`fs.azure.read.alwaysReadBufferSize`: Read request size configured by +`fs.azure.read.request.size` will be honoured only when the reads done are in +sequential pattern. When the read pattern is detected to be random, read size +will be same as the buffer length provided by the calling process. +This config when set to true will force random reads to also read in same +request sizes as sequential reads. This is a means to have same read patterns +as of ADLS Gen1, as it does not differentiate read patterns and always reads by +the configured read request size. The default value for this config will be +false, where reads for the provided buffer length is done when random read +pattern is detected. + +`fs.azure.readaheadqueue.depth`: Sets the readahead queue depth in +AbfsInputStream. In case the set value is negative the read ahead queue depth +will be set as Runtime.getRuntime().availableProcessors(). By default the value +will be -1. To disable readaheads, set this value to 0. If your workload is + doing only random reads (non-sequential) or you are seeing throttling, you + may try setting this value to 0. + +`fs.azure.read.readahead.blocksize`: To set the read buffer size for the read +aheads. Specify the value in bytes. The value should be between 16384 to +104857600 both inclusive (16 KB to 100 MB). The default value will be +4194304 (4 MB). + +`fs.azure.buffered.pread.disable`: By default the positional read API will do a +seek and read on input stream. This read will fill the buffer cache in +AbfsInputStream and update the cursor positions. If this optimization is true +it will skip usage of buffer and do a lock free REST call for reading from blob. +This optimization is very much helpful for HBase kind of short random read over +a shared AbfsInputStream instance. +Note: This is not a config which can be set at cluster level. It can be used as +an option on FutureDataInputStreamBuilder. +See FileSystem#openFile(Path path) + +To run under limited memory situations configure the following. Especially +when there are too many writes from the same process. + +`fs.azure.write.max.concurrent.requests`: To set the maximum concurrent + write requests from an AbfsOutputStream instance to server at any point of + time. Effectively this will be the threadpool size within the + AbfsOutputStream instance. Set the value in between 1 to 8 both inclusive. + +`fs.azure.write.max.requests.to.queue`: To set the maximum write requests + that can be queued. Memory consumption of AbfsOutputStream instance can be + tuned with this config considering each queued request holds a buffer. Set + the value 3 or 4 times the value set for s.azure.write.max.concurrent.requests. + +### Security Options +`fs.azure.always.use.https`: Enforces to use HTTPS instead of HTTP when the flag +is made true. Irrespective of the flag, AbfsClient will use HTTPS if the secure +scheme (ABFSS) is used or OAuth is used for authentication. By default this will +be set to true. + +`fs.azure.ssl.channel.mode`: Initializing DelegatingSSLSocketFactory with the +specified SSL channel mode. Value should be of the enum +DelegatingSSLSocketFactory.SSLChannelMode. The default value will be +DelegatingSSLSocketFactory.SSLChannelMode.Default. + +### Server Options +When the config `fs.azure.io.read.tolerate.concurrent.append` is made true, the +If-Match header sent to the server for read calls will be set as * otherwise the +same will be set with ETag. This is basically a mechanism in place to handle the +reads with optimistic concurrency. +Please refer the following links for further information. +1. https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/read +2. https://azure.microsoft.com/de-de/blog/managing-concurrency-in-microsoft-azure-storage-2/ + +listStatus API fetches the FileStatus information from server in a page by page +manner. The config `fs.azure.list.max.results` used to set the maxResults URI + param which sets the pagesize(maximum results per call). The value should + be > 0. By default this will be 5000. Server has a maximum value for this + parameter as 5000. So even if the config is above 5000 the response will only +contain 5000 entries. Please refer the following link for further information. +https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/list + +### Throttling Options +ABFS driver has the capability to throttle read and write operations to achieve +maximum throughput by minimizing errors. The errors occur when the account +ingress or egress limits are exceeded and, the server-side throttles requests. +Server-side throttling causes the retry policy to be used, but the retry policy +sleeps for long periods of time causing the total ingress or egress throughput +to be as much as 35% lower than optimal. The retry policy is also after the +fact, in that it applies after a request fails. On the other hand, the +client-side throttling implemented here happens before requests are made and +sleeps just enough to minimize errors, allowing optimal ingress and/or egress +throughput. By default the throttling mechanism is enabled in the driver. The +same can be disabled by setting the config `fs.azure.enable.autothrottling` +to false. + +### Rename Options +`fs.azure.atomic.rename.key`: Directories for atomic rename support can be +specified comma separated in this config. The driver prints the following +warning log if the source of the rename belongs to one of the configured +directories. "The atomic rename feature is not supported by the ABFS scheme +; however, rename, create and delete operations are atomic if Namespace is +enabled for your Azure Storage account." +The directories can be specified as comma separated values. By default the value +is "/hbase" + ### Perf Options #### 1. HTTP Request Tracking Options diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md index a26da839f0605..cf3b2344456af 100644 --- a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md +++ b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md @@ -592,6 +592,61 @@ with the Hadoop Distributed File System permissions model when hierarchical namespace is enabled for the storage account. Furthermore, the metadata and data produced by ADLS Gen 2 REST API can be consumed by Blob REST API, and vice versa. +## Generating test run configurations and test triggers over various config combinations + +To simplify the testing across various authentication and features combinations +that are mandatory for a PR, script `dev-support/testrun-scripts/runtests.sh` +should be used. Once the script is updated with relevant config settings for +various test combinations, it will: +1. Auto-generate configs specific to each test combinations +2. Run tests for all combinations +3. Summarize results across all the test combination runs. + +As a pre-requiste step, fill config values for test accounts and credentials +needed for authentication in `src/test/resources/azure-auth-keys.xml.template` +and rename as `src/test/resources/azure-auth-keys.xml`. + +**To add a new test combination:** Templates for mandatory test combinations +for PR validation are present in `dev-support/testrun-scripts/runtests.sh`. +If a new one needs to be added, add a combination set within +`dev-support/testrun-scripts/runtests.sh` similar to the ones already defined +and +1. Provide a new combination name +2. Update properties and values array which need to be effective for the test +combination +3. Call generateconfigs + +**To run PR validation:** Running command +* `dev-support/testrun-scripts/runtests.sh` will generate configurations for +each of the combinations defined and run tests for all the combinations. +* `dev-support/testrun-scripts/runtests.sh -c {combinationname}` Specific +combinations can be provided with -c option. If combinations are provided +with -c option, tests for only those combinations will be run. + +**Test logs:** Test runs will create a folder within dev-support/testlogs to +save the test logs. Folder name will be the test start timestamp. The mvn verify +command line logs for each combination will be saved into a file as +Test-Logs-$combination.txt into this folder. In case of any failures, this file +will have the failure exception stack. At the end of the test run, the +consolidated results of all the combination runs will be saved into a file as +Test-Results.log in the same folder. When run for PR validation, the +consolidated test results needs to be pasted into the PR comment section. + +**To generate config for use in IDE:** Running command with -a (activate) option +`dev-support/testrun-scripts/runtests.sh -a {combination name}` will update +the effective config relevant for the specific test combination. Hence the same +config files used by the mvn test runs can be used for IDE without any manual +updates needed within config file. + +**Other command line options:** +* -a Specify the combination name which needs to be +activated. This is to be used to generate config for use in IDE. +* -c Specify the combination name for test runs. If this +config is specified, tests for only the specified combinations will run. All +combinations of tests will be running if this config is not specified. +* -t ABFS mvn tests are run in parallel mode. Tests by default +are run with 8 thread count. It can be changed by providing -t + In order to test ABFS, please add the following configuration to your `src/test/resources/azure-auth-keys.xml` file. Note that the ABFS tests include compatibility tests which require WASB credentials, in addition to the ABFS @@ -646,7 +701,7 @@ hierarchical namespace enabled, and set the following configuration settings: fs.azure.account.auth.type.{YOUR_ABFS_ACCOUNT_NAME} {AUTH TYPE} - The authorization type can be SharedKey, OAuth, or Custom. The + The authorization type can be SharedKey, OAuth, Custom or SAS. The default is SharedKey. @@ -793,6 +848,136 @@ hierarchical namespace enabled, and set the following configuration settings: --> +``` +To run Delegation SAS test cases you must use a storage account with the +hierarchical namespace enabled and set the following configuration settings: + +```xml + + + + + + fs.azure.sas.token.provider.type + org.apache.hadoop.fs.azurebfs.extensions.MockDelegationSASTokenProvider + The fully qualified class name of the SAS token provider implementation. + + + + fs.azure.test.app.service.principal.tenant.id + {TID} + Tenant ID for the application's service principal. + + + + fs.azure.test.app.service.principal.object.id + {OID} + Object ID for the application's service principal. + + + + fs.azure.test.app.id + {app id} + The application's ID, also known as the client id. + + + + fs.azure.test.app.secret + {client secret} + The application's secret, also known as the client secret. + + + +``` + +To run CheckAccess test cases you must register an app with no RBAC and set +the following configurations. +```xml + + + + + fs.azure.enable.check.access + true + By default the check access will be on. Checkaccess can + be turned off by changing this flag to false. + + + fs.azure.account.test.oauth2.client.id + {client id} + The client id(app id) for the app created on step 1 + + + + fs.azure.account.test.oauth2.client.secret + {client secret} + +The client secret(application's secret) for the app created on step 1 + + + + fs.azure.check.access.testuser.guid + {guid} + The guid fetched on step 2 + + + fs.azure.account.oauth2.client.endpoint.{account name}.dfs.core +.windows.net + https://login.microsoftonline.com/{TENANTID}/oauth2/token + +Token end point. This can be found through Azure portal. As part of CheckAccess +test cases. The access will be tested for an FS instance created with the +above mentioned client credentials. So this configuration is necessary to +create the test FS instance. + + + ``` If running tests against an endpoint that uses the URL format diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java index 1c868ea0ff1e6..2c99b84394f82 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.Assert; @@ -130,15 +131,56 @@ public void testConcurrentDeleteFile() throws Exception { } } + /** + * Validate the bug fix for HADOOP-17089. Please note that we were never + * able to reproduce this except during a Spark job that ran for multiple days + * and in a hacked-up azure-storage SDK that added sleep before and after + * the call to factory.setNamespaceAware(true) as shown in the description of + * + * @see https://github.com/Azure/azure-storage-java/pull/546 + */ + @Test(timeout = TEST_EXECUTION_TIMEOUT) + public void testConcurrentList() throws Exception { + final Path testDir = new Path("/tmp/data-loss/11230174258112/_temporary/0/_temporary/attempt_20200624190514_0006_m_0"); + final Path testFile = new Path(testDir, "part-00004-15ea87b1-312c-4fdf-1820-95afb3dfc1c3-a010.snappy.parquet"); + fs.create(testFile).close(); + List tasks = new ArrayList<>(THREAD_COUNT); + + for (int i = 0; i < THREAD_COUNT; i++) { + tasks.add(new ListTask(fs, testDir)); + } + + ExecutorService es = null; + try { + es = Executors.newFixedThreadPool(THREAD_COUNT); + + List> futures = es.invokeAll(tasks); + + for (Future future : futures) { + Assert.assertTrue(future.isDone()); + + // we are using Callable, so if an exception + // occurred during the operation, it will be thrown + // when we call get + long fileCount = future.get(); + assertEquals("The list should always contain 1 file.", 1, fileCount); + } + } finally { + if (es != null) { + es.shutdownNow(); + } + } + } + abstract class FileSystemTask implements Callable { private final FileSystem fileSystem; private final Path path; - protected FileSystem getFileSystem() { + FileSystem getFileSystem() { return this.fileSystem; } - protected Path getFilePath() { + Path getFilePath() { return this.path; } @@ -182,4 +224,17 @@ public Void call() throws Exception { return null; } } + + class ListTask extends FileSystemTask { + ListTask(FileSystem fs, Path p) { + super(fs, p); + } + + public Integer call() throws Exception { + FileSystem fs = getFileSystem(); + Path p = getFilePath(); + FileStatus[] files = fs.listStatus(p); + return files.length; + } + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutputStreamSemantics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutputStreamSemantics.java index b8edc4b7d6586..835b82c3c1924 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutputStreamSemantics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutputStreamSemantics.java @@ -27,8 +27,6 @@ import com.microsoft.azure.storage.blob.BlockListingFilter; import com.microsoft.azure.storage.blob.CloudBlockBlob; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -39,8 +37,9 @@ import org.hamcrest.core.IsNot; import org.junit.Test; -import static org.junit.Assert.*; -import static org.junit.Assume.assumeNotNull; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertCapabilities; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasStreamCapabilities; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertLacksStreamCapabilities; /** * Test semantics of functions flush, hflush, hsync, and close for block blobs, @@ -192,11 +191,14 @@ public void testPageBlobClose() throws IOException { public void testPageBlobCapabilities() throws IOException { Path path = getBlobPathWithTestName(PAGE_BLOB_DIR); try (FSDataOutputStream stream = fs.create(path)) { - assertTrue(stream.hasCapability(StreamCapabilities.HFLUSH)); - assertTrue(stream.hasCapability(StreamCapabilities.HSYNC)); - assertFalse(stream.hasCapability(StreamCapabilities.DROPBEHIND)); - assertFalse(stream.hasCapability(StreamCapabilities.READAHEAD)); - assertFalse(stream.hasCapability(StreamCapabilities.UNBUFFER)); + assertCapabilities(stream, + new String[]{ + StreamCapabilities.HFLUSH, + StreamCapabilities.HSYNC, + StreamCapabilities.DROPBEHIND, + StreamCapabilities.READAHEAD, + StreamCapabilities.UNBUFFER}, + null); stream.write(getRandomBytes()); } } @@ -285,11 +287,12 @@ public void testBlockBlobClose() throws IOException { public void testBlockBlobCapabilities() throws IOException { Path path = getBlobPathWithTestName(BLOCK_BLOB_DIR); try (FSDataOutputStream stream = fs.create(path)) { - assertFalse(stream.hasCapability(StreamCapabilities.HFLUSH)); - assertFalse(stream.hasCapability(StreamCapabilities.HSYNC)); - assertFalse(stream.hasCapability(StreamCapabilities.DROPBEHIND)); - assertFalse(stream.hasCapability(StreamCapabilities.READAHEAD)); - assertFalse(stream.hasCapability(StreamCapabilities.UNBUFFER)); + assertLacksStreamCapabilities(stream, + StreamCapabilities.HFLUSH, + StreamCapabilities.HSYNC, + StreamCapabilities.DROPBEHIND, + StreamCapabilities.READAHEAD, + StreamCapabilities.UNBUFFER); stream.write(getRandomBytes()); } } @@ -381,11 +384,12 @@ public void testBlockBlobCompactionClose() throws IOException { public void testBlockBlobCompactionCapabilities() throws IOException { Path path = getBlobPathWithTestName(BLOCK_BLOB_COMPACTION_DIR); try (FSDataOutputStream stream = fs.create(path)) { - assertTrue(stream.hasCapability(StreamCapabilities.HFLUSH)); - assertTrue(stream.hasCapability(StreamCapabilities.HSYNC)); - assertFalse(stream.hasCapability(StreamCapabilities.DROPBEHIND)); - assertFalse(stream.hasCapability(StreamCapabilities.READAHEAD)); - assertFalse(stream.hasCapability(StreamCapabilities.UNBUFFER)); + assertHasStreamCapabilities(stream, + StreamCapabilities.HFLUSH, + StreamCapabilities.HSYNC, + StreamCapabilities.DROPBEHIND, + StreamCapabilities.READAHEAD, + StreamCapabilities.UNBUFFER); stream.write(getRandomBytes()); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestPageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestPageBlobOutputStream.java new file mode 100644 index 0000000000000..416143d3f0add --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestPageBlobOutputStream.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azure; + +import java.io.IOException; +import java.util.EnumSet; + +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.LambdaTestUtils; + +public class ITestPageBlobOutputStream extends AbstractWasbTestBase { + + private static final Path TEST_FILE_PATH = new Path( + "TestPageBlobOutputStream.txt"); + + @Override + protected AzureBlobStorageTestAccount createTestAccount() throws Exception { + Configuration conf = new Configuration(); + // Configure the page blob directories key so every file created is a page + // blob. + conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/"); + return AzureBlobStorageTestAccount.create("testpagebloboutputstream", + EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer), + conf, true); + } + + @Test + public void testHflush() throws Exception { + Path path = fs.makeQualified(TEST_FILE_PATH); + FSDataOutputStream os = fs.create(path); + os.write(1); + os.hflush(); + // Delete the blob so that Azure call will fail. + fs.delete(path, false); + os.write(2); + LambdaTestUtils.intercept(IOException.class, + "The specified blob does not exist", () -> { + os.hflush(); + }); + LambdaTestUtils.intercept(IOException.class, + "The specified blob does not exist", () -> { + os.close(); + }); + } + + @Test + public void testHsync() throws Exception { + Path path = fs.makeQualified(TEST_FILE_PATH); + FSDataOutputStream os = fs.create(path); + os.write(1); + os.hsync(); + // Delete the blob so that Azure call will fail. + fs.delete(path, false); + os.write(2); + LambdaTestUtils.intercept(IOException.class, + "The specified blob does not exist", () -> { + os.hsync(); + }); + LambdaTestUtils.intercept(IOException.class, + "The specified blob does not exist", () -> { + os.close(); + }); + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java index 982e92bb31a2f..7398e521bc51b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java @@ -655,9 +655,7 @@ public void testCanonicalServiceName() throws Exception { // because the mock container does not exist, this call is expected to fail. intercept(IllegalArgumentException.class, "java.net.UnknownHostException", - () -> { - fs0.getCanonicalServiceName(); - }); + () -> fs0.getCanonicalServiceName()); conf.setBoolean(RETURN_URI_AS_CANONICAL_SERVICE_NAME_PROPERTY_NAME, true); FileSystem fs1 = FileSystem.newInstance(defaultUri, conf); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/InMemoryBlockBlobStore.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/InMemoryBlockBlobStore.java index b8971c488c45a..7ddeabe242ef6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/InMemoryBlockBlobStore.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/InMemoryBlockBlobStore.java @@ -25,6 +25,8 @@ import java.util.HashMap; import java.util.Map; +import static java.util.Objects.requireNonNull; + /** * A simple memory key-value store to help mock the Windows Azure Storage * implementation for unit testing. @@ -163,7 +165,10 @@ public synchronized boolean exists(String key) { @SuppressWarnings("unchecked") public synchronized HashMap getMetadata(String key) { - return (HashMap) blobs.get(key).metadata.clone(); + Entry entry = requireNonNull(blobs.get(key), "entry for " + key); + return (HashMap) requireNonNull(entry.metadata, + "metadata for " + key) + .clone(); } public synchronized HashMap getContainerMetadata() { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java index 1739cff76d400..6d11207c479b5 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java @@ -37,6 +37,7 @@ import org.apache.commons.codec.DecoderException; import org.apache.commons.codec.net.URLCodec; import org.apache.commons.lang3.NotImplementedException; +import org.apache.hadoop.fs.Path; import org.apache.http.client.utils.URIBuilder; import com.microsoft.azure.storage.AccessCondition; @@ -137,9 +138,20 @@ private static String convertUriToDecodedString(URI uri) { private static URI convertKeyToEncodedUri(String key) { try { - return new URIBuilder().setPath(key).build(); + Path p = new Path(key); + URI unEncodedURI = p.toUri(); + return new URIBuilder().setPath(unEncodedURI.getPath()) + .setScheme(unEncodedURI.getScheme()).build(); } catch (URISyntaxException e) { - throw new AssertionError("Failed to encode key: " + key); + int i = e.getIndex(); + String details; + if (i >= 0) { + details = " -- \"" + e.getInput().charAt(i) + "\""; + } else { + details = ""; + } + throw new AssertionError("Failed to encode key: " + key + + ": " + e + details); } } @@ -148,8 +160,8 @@ public CloudBlobContainerWrapper getContainerReference(String name) throws URISyntaxException, StorageException { String fullUri; URIBuilder builder = new URIBuilder(baseUriString); - fullUri = builder.setPath(builder.getPath() + "/" + name).toString(); - + String path = builder.getPath() == null ? "" : builder.getPath() + "/"; + fullUri = builder.setPath(path + name).toString(); MockCloudBlobContainerWrapper container = new MockCloudBlobContainerWrapper( fullUri, name); // Check if we have a pre-existing container with that name, and prime diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java index 30c102839cb1e..832e7ec05a0af 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobMetadata.java @@ -202,8 +202,10 @@ public void testPermissionMetadata() throws Exception { Path selfishFile = new Path("/noOneElse"); fs.create(selfishFile, justMe, true, 4096, fs.getDefaultReplication(), fs.getDefaultBlockSize(), null).close(); + String mockUri = AzureBlobStorageTestAccount.toMockUri(selfishFile); + assertNotNull("converted URI", mockUri); HashMap metadata = backingStore - .getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile)); + .getMetadata(mockUri); assertNotNull(metadata); String storedPermission = metadata.get("hdi_permission"); assertEquals(getExpectedPermissionString("rw-------"), storedPermission); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java index 4c618275e7e36..2f1c90286571b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java @@ -45,7 +45,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE; import static org.apache.hadoop.fs.azure.CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestSyncableDataOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestSyncableDataOutputStream.java new file mode 100644 index 0000000000000..c8c6d93f49d9a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestSyncableDataOutputStream.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azure; + +import java.io.IOException; +import java.io.OutputStream; + +import org.junit.Test; + +import org.apache.hadoop.test.LambdaTestUtils; + +public class TestSyncableDataOutputStream { + + @Test + public void testCloseWhenFlushThrowingIOException() throws Exception { + MockOutputStream out = new MockOutputStream(); + SyncableDataOutputStream sdos = new SyncableDataOutputStream(out); + out.flushThrowIOE = true; + LambdaTestUtils.intercept(IOException.class, "An IOE from flush", () -> sdos.close()); + MockOutputStream out2 = new MockOutputStream(); + out2.flushThrowIOE = true; + LambdaTestUtils.intercept(IOException.class, "An IOE from flush", () -> { + try (SyncableDataOutputStream sdos2 = new SyncableDataOutputStream(out2)) { + } + }); + } + + private static class MockOutputStream extends OutputStream { + + private boolean flushThrowIOE = false; + private IOException lastException = null; + + @Override + public void write(int arg0) throws IOException { + + } + + @Override + public void flush() throws IOException { + if (this.flushThrowIOE) { + this.lastException = new IOException("An IOE from flush"); + throw this.lastException; + } + } + + @Override + public void close() throws IOException { + if (this.lastException != null) { + throw this.lastException; + } + } + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index a42648fc18597..7b3b5c109918c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -21,10 +21,12 @@ import java.io.IOException; import java.net.URI; import java.util.Hashtable; +import java.util.Map; import java.util.UUID; import java.util.concurrent.Callable; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,7 +35,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.security.AbfsDelegationTokenManager; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; import org.apache.hadoop.fs.azurebfs.services.AuthType; import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore; import org.apache.hadoop.fs.azure.NativeAzureFileSystem; @@ -43,6 +47,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.utils.UriUtils; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; import static org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.WASB_ACCOUNT_NAME_DOMAIN_SUFFIX; @@ -75,6 +80,7 @@ public abstract class AbstractAbfsIntegrationTest extends private String testUrl; private AuthType authType; private boolean useConfiguredFileSystem = false; + private boolean usingFilesystemForSASTests = false; protected AbstractAbfsIntegrationTest() throws Exception { fileSystemName = TEST_CONTAINER_PREFIX + UUID.randomUUID().toString(); @@ -116,6 +122,10 @@ protected AbstractAbfsIntegrationTest() throws Exception { this.testUrl = defaultUri.toString(); abfsConfig.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultUri.toString()); abfsConfig.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true); + if (abfsConfig.get(FS_AZURE_TEST_APPENDBLOB_ENABLED) == "true") { + String appendblobDirs = this.testUrl + "," + abfsConfig.get(FS_AZURE_CONTRACT_TEST_URI); + rawConfig.set(FS_AZURE_APPEND_BLOB_KEY, appendblobDirs); + } // For testing purposes, an IP address and port may be provided to override // the host specified in the FileSystem URI. Also note that the format of // the Azure Storage Service URI changes from @@ -171,8 +181,13 @@ public void teardown() throws Exception { return; } - // Delete all uniquely created filesystem from the account - if (!useConfiguredFileSystem) { + if (usingFilesystemForSASTests) { + abfsConfig.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey.name()); + AzureBlobFileSystem tempFs = (AzureBlobFileSystem) FileSystem.newInstance(rawConfig); + tempFs.getAbfsStore().deleteFilesystem(); + } + else if (!useConfiguredFileSystem) { + // Delete all uniquely created filesystem from the account final AzureBlobFileSystemStore abfsStore = abfs.getAbfsStore(); abfsStore.deleteFilesystem(); @@ -221,6 +236,16 @@ public void loadConfiguredFileSystem() throws Exception { useConfiguredFileSystem = true; } + protected void createFilesystemForSASTests() throws Exception { + // The SAS tests do not have permission to create a filesystem + // so first create temporary instance of the filesystem using SharedKey + // then re-use the filesystem it creates with SAS auth instead of SharedKey. + AzureBlobFileSystem tempFs = (AzureBlobFileSystem) FileSystem.newInstance(rawConfig); + Assert.assertTrue(tempFs.exists(new Path("/"))); + abfsConfig.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SAS.name()); + usingFilesystemForSASTests = true; + } + public AzureBlobFileSystem getFileSystem() throws IOException { return abfs; } @@ -270,6 +295,11 @@ protected String getTestUrl() { protected void setFileSystemName(String fileSystemName) { this.fileSystemName = fileSystemName; } + + protected String getMethodName() { + return methodName.getMethodName(); + } + protected String getFileSystemName() { return fileSystemName; } @@ -362,6 +392,14 @@ public Path getTestPath() { return path; } + public AzureBlobFileSystemStore getAbfsStore(final AzureBlobFileSystem fs) { + return fs.getAbfsStore(); + } + + public Path makeQualified(Path path) throws java.io.IOException { + return getFileSystem().makeQualified(path); + } + /** * Create a path under the test path provided by * {@link #getTestPath()}. @@ -383,4 +421,37 @@ protected AbfsDelegationTokenManager getDelegationTokenManager() throws IOException { return getFileSystem().getDelegationTokenManager(); } + + /** + * Generic create File and enabling AbfsOutputStream Flush. + * + * @param fs AzureBlobFileSystem that is initialised in the test. + * @param path Path of the file to be created. + * @return AbfsOutputStream for writing. + * @throws AzureBlobFileSystemException + */ + protected AbfsOutputStream createAbfsOutputStreamWithFlushEnabled( + AzureBlobFileSystem fs, + Path path) throws AzureBlobFileSystemException { + AzureBlobFileSystemStore abfss = fs.getAbfsStore(); + abfss.getAbfsConfiguration().setDisableOutputStreamFlush(false); + + return (AbfsOutputStream) abfss.createFile(path, fs.getFsStatistics(), + true, FsPermission.getDefault(), FsPermission.getUMask(fs.getConf())); + } + + /** + * Custom assertion for AbfsStatistics which have statistics, expected + * value and map of statistics and value as its parameters. + * @param statistic the AbfsStatistics which needs to be asserted. + * @param expectedValue the expected value of the statistics. + * @param metricMap map of (String, Long) with statistics name as key and + * statistics value as map value. + */ + protected long assertAbfsStatistics(AbfsStatistic statistic, + long expectedValue, Map metricMap) { + assertEquals("Mismatch in " + statistic.getStatName(), expectedValue, + (long) metricMap.get(statistic.getStatName())); + return expectedValue; + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsTestWithTimeout.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsTestWithTimeout.java index fee90abeabc9e..0485422871ecc 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsTestWithTimeout.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsTestWithTimeout.java @@ -17,12 +17,19 @@ */ package org.apache.hadoop.fs.azurebfs; +import java.io.IOException; + import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.rules.TestName; import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.Path; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.TEST_TIMEOUT; @@ -31,6 +38,9 @@ * This class does not attempt to bind to Azure. */ public class AbstractAbfsTestWithTimeout extends Assert { + private static final Logger LOG = + LoggerFactory.getLogger(AbstractAbfsTestWithTimeout.class); + /** * The name of the current method. */ @@ -67,4 +77,53 @@ public void nameThread() { protected int getTestTimeoutMillis() { return TEST_TIMEOUT; } + + /** + * Describe a test in the logs. + * + * @param text text to print + * @param args arguments to format in the printing + */ + protected void describe(String text, Object... args) { + LOG.info("\n\n{}: {}\n", + methodName.getMethodName(), + String.format(text, args)); + } + + /** + * Validate Contents written on a file in Abfs. + * + * @param fs AzureBlobFileSystem + * @param path Path of the file + * @param originalByteArray original byte array + * @return if content is validated true else, false + * @throws IOException + */ + protected boolean validateContent(AzureBlobFileSystem fs, Path path, + byte[] originalByteArray) + throws IOException { + int pos = 0; + int lenOfOriginalByteArray = originalByteArray.length; + + try (FSDataInputStream in = fs.open(path)) { + byte valueOfContentAtPos = (byte) in.read(); + + while (valueOfContentAtPos != -1 && pos < lenOfOriginalByteArray) { + if (originalByteArray[pos] != valueOfContentAtPos) { + assertEquals("Mismatch in content validation at position {}", pos, + originalByteArray[pos], valueOfContentAtPos); + return false; + } + valueOfContentAtPos = (byte) in.read(); + pos++; + } + if (valueOfContentAtPos != -1) { + assertEquals("Expected end of file", -1, valueOfContentAtPos); + return false; + } + return true; + } + + } + } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java index b44914e4f31dd..f0473789cf161 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.UUID; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformer; import org.apache.hadoop.fs.permission.AclEntry; import org.junit.Test; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java new file mode 100644 index 0000000000000..a33a76ecefe77 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java @@ -0,0 +1,411 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamContext; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamStatisticsImpl; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.io.IOUtils; + +public class ITestAbfsInputStreamStatistics + extends AbstractAbfsIntegrationTest { + private static final int OPERATIONS = 10; + private static final Logger LOG = + LoggerFactory.getLogger(ITestAbfsInputStreamStatistics.class); + private static final int ONE_MB = 1024 * 1024; + private static final int ONE_KB = 1024; + private static final int CUSTOM_BLOCK_BUFFER_SIZE = 4 * 1024; + private byte[] defBuffer = new byte[ONE_MB]; + + public ITestAbfsInputStreamStatistics() throws Exception { + } + + /** + * Test to check the initial values of the AbfsInputStream statistics. + */ + @Test + public void testInitValues() throws IOException { + describe("Testing the initial values of AbfsInputStream Statistics"); + + AzureBlobFileSystem fs = getFileSystem(); + AzureBlobFileSystemStore abfss = fs.getAbfsStore(); + Path initValuesPath = path(getMethodName()); + AbfsOutputStream outputStream = null; + AbfsInputStream inputStream = null; + + try { + + outputStream = createAbfsOutputStreamWithFlushEnabled(fs, initValuesPath); + inputStream = abfss.openFileForRead(initValuesPath, fs.getFsStatistics()); + + AbfsInputStreamStatisticsImpl stats = + (AbfsInputStreamStatisticsImpl) inputStream.getStreamStatistics(); + + checkInitValue(stats.getSeekOperations(), "seekOps"); + checkInitValue(stats.getForwardSeekOperations(), "forwardSeekOps"); + checkInitValue(stats.getBackwardSeekOperations(), "backwardSeekOps"); + checkInitValue(stats.getBytesRead(), "bytesRead"); + checkInitValue(stats.getBytesSkippedOnSeek(), "bytesSkippedOnSeek"); + checkInitValue(stats.getBytesBackwardsOnSeek(), "bytesBackwardsOnSeek"); + checkInitValue(stats.getSeekInBuffer(), "seekInBuffer"); + checkInitValue(stats.getReadOperations(), "readOps"); + checkInitValue(stats.getBytesReadFromBuffer(), "bytesReadFromBuffer"); + checkInitValue(stats.getRemoteReadOperations(), "remoteReadOps"); + checkInitValue(stats.getReadAheadBytesRead(), "readAheadBytesRead"); + checkInitValue(stats.getRemoteBytesRead(), "readAheadRemoteBytesRead"); + + } finally { + IOUtils.cleanupWithLogger(LOG, outputStream, inputStream); + } + } + + /** + * Test to check statistics from seek operation in AbfsInputStream. + */ + @Test + public void testSeekStatistics() throws IOException { + describe("Testing the values of statistics from seek operations in " + + "AbfsInputStream"); + + AzureBlobFileSystem fs = getFileSystem(); + AzureBlobFileSystemStore abfss = fs.getAbfsStore(); + Path seekStatPath = path(getMethodName()); + + AbfsOutputStream out = null; + AbfsInputStream in = null; + + try { + out = createAbfsOutputStreamWithFlushEnabled(fs, seekStatPath); + + //Writing a default buffer in a file. + out.write(defBuffer); + out.hflush(); + in = abfss.openFileForRead(seekStatPath, fs.getFsStatistics()); + + /* + * Writing 1MB buffer to the file, this would make the fCursor(Current + * position of cursor) to the end of file. + */ + int result = in.read(defBuffer, 0, ONE_MB); + LOG.info("Result of read : {}", result); + + /* + * Seeking to start of file and then back to end would result in a + * backward and a forward seek respectively 10 times. + */ + for (int i = 0; i < OPERATIONS; i++) { + in.seek(0); + in.seek(ONE_MB); + } + + AbfsInputStreamStatisticsImpl stats = + (AbfsInputStreamStatisticsImpl) in.getStreamStatistics(); + + LOG.info("STATISTICS: {}", stats.toString()); + + /* + * seekOps - Since we are doing backward and forward seek OPERATIONS + * times, total seeks would be 2 * OPERATIONS. + * + * backwardSeekOps - Since we are doing a backward seek inside a loop + * for OPERATION times, total backward seeks would be OPERATIONS. + * + * forwardSeekOps - Since we are doing a forward seek inside a loop + * for OPERATION times, total forward seeks would be OPERATIONS. + * + * negativeBytesBackwardsOnSeek - Since we are doing backward seeks from + * end of file in a ONE_MB file each time, this would mean the bytes from + * backward seek would be OPERATIONS * ONE_MB. + * + * bytesSkippedOnSeek - Since, we move from start to end in seek, but + * our fCursor(position of cursor) always remain at end of file, this + * would mean no bytes were skipped on seek. Since, all forward seeks + * are in buffer. + * + * seekInBuffer - Since all seeks were in buffer, the seekInBuffer + * would be equal to 2 * OPERATIONS. + * + */ + assertEquals("Mismatch in seekOps value", 2 * OPERATIONS, + stats.getSeekOperations()); + assertEquals("Mismatch in backwardSeekOps value", OPERATIONS, + stats.getBackwardSeekOperations()); + assertEquals("Mismatch in forwardSeekOps value", OPERATIONS, + stats.getForwardSeekOperations()); + assertEquals("Mismatch in bytesBackwardsOnSeek value", + OPERATIONS * ONE_MB, stats.getBytesBackwardsOnSeek()); + assertEquals("Mismatch in bytesSkippedOnSeek value", + 0, stats.getBytesSkippedOnSeek()); + assertEquals("Mismatch in seekInBuffer value", 2 * OPERATIONS, + stats.getSeekInBuffer()); + + in.close(); + // Verifying whether stats are readable after stream is closed. + LOG.info("STATISTICS after closing: {}", stats.toString()); + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + + /** + * Test to check statistics value from read operation in AbfsInputStream. + */ + @Test + public void testReadStatistics() throws IOException { + describe("Testing the values of statistics from read operation in " + + "AbfsInputStream"); + + AzureBlobFileSystem fs = getFileSystem(); + AzureBlobFileSystemStore abfss = fs.getAbfsStore(); + Path readStatPath = path(getMethodName()); + + AbfsOutputStream out = null; + AbfsInputStream in = null; + + try { + out = createAbfsOutputStreamWithFlushEnabled(fs, readStatPath); + + /* + * Writing 1MB buffer to the file. + */ + out.write(defBuffer); + out.hflush(); + in = abfss.openFileForRead(readStatPath, fs.getFsStatistics()); + + /* + * Doing file read 10 times. + */ + for (int i = 0; i < OPERATIONS; i++) { + in.read(); + } + + AbfsInputStreamStatisticsImpl stats = + (AbfsInputStreamStatisticsImpl) in.getStreamStatistics(); + + LOG.info("STATISTICS: {}", stats.toString()); + + /* + * bytesRead - Since each time a single byte is read, total + * bytes read would be equal to OPERATIONS. + * + * readOps - Since each time read operation is performed OPERATIONS + * times, total number of read operations would be equal to OPERATIONS. + * + * remoteReadOps - Only a single remote read operation is done. Hence, + * total remote read ops is 1. + * + */ + assertEquals("Mismatch in bytesRead value", OPERATIONS, + stats.getBytesRead()); + assertEquals("Mismatch in readOps value", OPERATIONS, + stats.getReadOperations()); + assertEquals("Mismatch in remoteReadOps value", 1, + stats.getRemoteReadOperations()); + + in.close(); + // Verifying if stats are still readable after stream is closed. + LOG.info("STATISTICS after closing: {}", stats.toString()); + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + + /** + * Testing AbfsInputStream works with null Statistics. + */ + @Test + public void testWithNullStreamStatistics() throws IOException { + describe("Testing AbfsInputStream operations with statistics as null"); + + AzureBlobFileSystem fs = getFileSystem(); + Path nullStatFilePath = path(getMethodName()); + byte[] oneKbBuff = new byte[ONE_KB]; + + // Creating an AbfsInputStreamContext instance with null StreamStatistics. + AbfsInputStreamContext abfsInputStreamContext = + new AbfsInputStreamContext( + getConfiguration().getSasTokenRenewPeriodForStreamsInSeconds()) + .withReadBufferSize(getConfiguration().getReadBufferSize()) + .withReadAheadQueueDepth(getConfiguration().getReadAheadQueueDepth()) + .withStreamStatistics(null) + .build(); + + AbfsOutputStream out = null; + AbfsInputStream in = null; + + try { + out = createAbfsOutputStreamWithFlushEnabled(fs, nullStatFilePath); + + // Writing a 1KB buffer in the file. + out.write(oneKbBuff); + out.hflush(); + + // AbfsRestOperation Instance required for eTag. + AbfsRestOperation abfsRestOperation = + fs.getAbfsClient().getPathStatus(nullStatFilePath.toUri().getPath(), false); + + // AbfsInputStream with no StreamStatistics. + in = new AbfsInputStream(fs.getAbfsClient(), null, + nullStatFilePath.toUri().getPath(), ONE_KB, + abfsInputStreamContext, + abfsRestOperation.getResult().getResponseHeader("ETag")); + + // Verifying that AbfsInputStream Operations works with null statistics. + assertNotEquals("AbfsInputStream read() with null statistics should " + + "work", -1, in.read()); + in.seek(ONE_KB); + + // Verifying toString() with no StreamStatistics. + LOG.info("AbfsInputStream: {}", in.toString()); + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + + /** + * Testing readAhead counters in AbfsInputStream with 30 seconds timeout. + */ + @Test + public void testReadAheadCounters() throws IOException { + describe("Test to check correct values for readAhead counters in " + + "AbfsInputStream"); + + AzureBlobFileSystem fs = getFileSystem(); + AzureBlobFileSystemStore abfss = fs.getAbfsStore(); + Path readAheadCountersPath = path(getMethodName()); + + /* + * Setting the block size for readAhead as 4KB. + */ + abfss.getAbfsConfiguration().setReadBufferSize(CUSTOM_BLOCK_BUFFER_SIZE); + + AbfsOutputStream out = null; + AbfsInputStream in = null; + + try { + + /* + * Creating a file of 1MB size. + */ + out = createAbfsOutputStreamWithFlushEnabled(fs, readAheadCountersPath); + out.write(defBuffer); + out.close(); + + in = abfss.openFileForRead(readAheadCountersPath, fs.getFsStatistics()); + + /* + * Reading 1KB after each i * KB positions. Hence the reads are from 0 + * to 1KB, 1KB to 2KB, and so on.. for 5 operations. + */ + for (int i = 0; i < 5; i++) { + in.seek(ONE_KB * i); + in.read(defBuffer, ONE_KB * i, ONE_KB); + } + AbfsInputStreamStatisticsImpl stats = + (AbfsInputStreamStatisticsImpl) in.getStreamStatistics(); + + /* + * Verifying the counter values of readAheadBytesRead and remoteBytesRead. + * + * readAheadBytesRead : Since, we read 1KBs 5 times, that means we go + * from 0 to 5KB in the file. The bufferSize is set to 4KB, and since + * we have 8 blocks of readAhead buffer. We would have 8 blocks of 4KB + * buffer. Our read is till 5KB, hence readAhead would ideally read 2 + * blocks of 4KB which is equal to 8KB. But, sometimes to get blocks + * from readAhead buffer we might have to wait for background + * threads to fill the buffer and hence we might do remote read which + * would be faster. Therefore, readAheadBytesRead would be greater than + * or equal to the value of bytesFromReadAhead at the point we measure it. + * + * remoteBytesRead : Since, the bufferSize is set to 4KB and the number + * of blocks or readAheadQueueDepth is equal to 8. We would read 8 * 4 + * KB buffer on the first read, which is equal to 32KB. But, if we are not + * able to read some bytes that were in the buffer after doing + * readAhead, we might use remote read again. Thus, the bytes read + * remotely would be greater than or equal to the bytesFromRemoteRead + * value that we measure at some point of the operation. + * + */ + Assertions.assertThat(stats.getReadAheadBytesRead()).describedAs( + "Mismatch in readAheadBytesRead counter value") + .isGreaterThanOrEqualTo(in.getBytesFromReadAhead()); + + Assertions.assertThat(stats.getRemoteBytesRead()).describedAs( + "Mismatch in remoteBytesRead counter value") + .isGreaterThanOrEqualTo(in.getBytesFromRemoteRead()); + + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + + /** + * Testing time taken by AbfsInputStream to complete a GET request. + */ + @Test + public void testActionHttpGetRequest() throws IOException { + describe("Test to check the correct value of Time taken by http get " + + "request in AbfsInputStream"); + AzureBlobFileSystem fs = getFileSystem(); + AzureBlobFileSystemStore abfss = fs.getAbfsStore(); + Path actionHttpGetRequestPath = path(getMethodName()); + AbfsInputStream abfsInputStream = null; + AbfsOutputStream abfsOutputStream = null; + try { + abfsOutputStream = createAbfsOutputStreamWithFlushEnabled(fs, + actionHttpGetRequestPath); + abfsOutputStream.write('a'); + abfsOutputStream.hflush(); + + abfsInputStream = + abfss.openFileForRead(actionHttpGetRequestPath, fs.getFsStatistics()); + abfsInputStream.read(); + AbfsInputStreamStatisticsImpl abfsInputStreamStatistics = + (AbfsInputStreamStatisticsImpl) abfsInputStream.getStreamStatistics(); + + LOG.info("AbfsInputStreamStats info: {}", abfsInputStreamStatistics.toString()); + Assertions.assertThat( + abfsInputStreamStatistics.getActionHttpGetRequest()) + .describedAs("Mismatch in time taken by a GET request") + .isGreaterThan(0.0); + } finally { + IOUtils.cleanupWithLogger(LOG, abfsInputStream, abfsOutputStream); + } + } + + /** + * Method to assert the initial values of the statistics. + * + * @param actualValue the actual value of the statistics. + * @param statistic the name of operation or statistic being asserted. + */ + private void checkInitValue(long actualValue, String statistic) { + assertEquals("Mismatch in " + statistic + " value", 0, actualValue); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java new file mode 100644 index 0000000000000..6d5e4cf3bce2d --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java @@ -0,0 +1,340 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.mockito.Mockito; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.azurebfs.services.AbfsListStatusRemoteIterator; +import org.apache.hadoop.fs.azurebfs.services.ListingSupport; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.verify; + +/** + * Test ListStatusRemoteIterator operation. + */ +public class ITestAbfsListStatusRemoteIterator extends AbstractAbfsIntegrationTest { + + private static final int TEST_FILES_NUMBER = 1000; + + public ITestAbfsListStatusRemoteIterator() throws Exception { + } + + @Test + public void testAbfsIteratorWithHasNext() throws Exception { + Path testDir = createTestDirectory(); + setPageSize(10); + final List fileNames = createFilesUnderDirectory(TEST_FILES_NUMBER, + testDir, "testListPath"); + + ListingSupport listngSupport = Mockito.spy(getFileSystem().getAbfsStore()); + RemoteIterator fsItr = new AbfsListStatusRemoteIterator( + getFileSystem().getFileStatus(testDir), listngSupport); + Assertions.assertThat(fsItr) + .describedAs("RemoteIterator should be instance of " + + "AbfsListStatusRemoteIterator by default") + .isInstanceOf(AbfsListStatusRemoteIterator.class); + int itrCount = 0; + while (fsItr.hasNext()) { + FileStatus fileStatus = fsItr.next(); + String pathStr = fileStatus.getPath().toString(); + fileNames.remove(pathStr); + itrCount++; + } + Assertions.assertThat(itrCount) + .describedAs("Number of iterations should be equal to the files " + + "created") + .isEqualTo(TEST_FILES_NUMBER); + Assertions.assertThat(fileNames.size()) + .describedAs("After removing every iterm found from the iterator, " + + "there should be no more elements in the fileNames") + .isEqualTo(0); + int minNumberOfInvokations = TEST_FILES_NUMBER / 10; + verify(listngSupport, Mockito.atLeast(minNumberOfInvokations)) + .listStatus(any(Path.class), nullable(String.class), + anyList(), anyBoolean(), + nullable(String.class)); + } + + @Test + public void testAbfsIteratorWithoutHasNext() throws Exception { + Path testDir = createTestDirectory(); + setPageSize(10); + final List fileNames = createFilesUnderDirectory(TEST_FILES_NUMBER, + testDir, "testListPath"); + + ListingSupport listngSupport = Mockito.spy(getFileSystem().getAbfsStore()); + RemoteIterator fsItr = new AbfsListStatusRemoteIterator( + getFileSystem().getFileStatus(testDir), listngSupport); + Assertions.assertThat(fsItr) + .describedAs("RemoteIterator should be instance of " + + "AbfsListStatusRemoteIterator by default") + .isInstanceOf(AbfsListStatusRemoteIterator.class); + int itrCount = 0; + for (int i = 0; i < TEST_FILES_NUMBER; i++) { + FileStatus fileStatus = fsItr.next(); + String pathStr = fileStatus.getPath().toString(); + fileNames.remove(pathStr); + itrCount++; + } + Assertions.assertThatThrownBy(() -> fsItr.next()) + .describedAs( + "next() should throw NoSuchElementException since next has been " + + "called " + TEST_FILES_NUMBER + " times") + .isInstanceOf(NoSuchElementException.class); + Assertions.assertThat(itrCount) + .describedAs("Number of iterations should be equal to the files " + + "created") + .isEqualTo(TEST_FILES_NUMBER); + Assertions.assertThat(fileNames.size()) + .describedAs("After removing every iterm found from the iterator, " + + "there should be no more elements in the fileNames") + .isEqualTo(0); + int minNumberOfInvokations = TEST_FILES_NUMBER / 10; + verify(listngSupport, Mockito.atLeast(minNumberOfInvokations)) + .listStatus(any(Path.class), nullable(String.class), + anyList(), anyBoolean(), + nullable(String.class)); + } + + @Test + public void testWithAbfsIteratorDisabled() throws Exception { + Path testDir = createTestDirectory(); + setPageSize(10); + setEnableAbfsIterator(false); + final List fileNames = createFilesUnderDirectory(TEST_FILES_NUMBER, + testDir, "testListPath"); + + RemoteIterator fsItr = + getFileSystem().listStatusIterator(testDir); + Assertions.assertThat(fsItr) + .describedAs("RemoteIterator should not be instance of " + + "AbfsListStatusRemoteIterator when it is disabled") + .isNotInstanceOf(AbfsListStatusRemoteIterator.class); + int itrCount = 0; + while (fsItr.hasNext()) { + FileStatus fileStatus = fsItr.next(); + String pathStr = fileStatus.getPath().toString(); + fileNames.remove(pathStr); + itrCount++; + } + Assertions.assertThat(itrCount) + .describedAs("Number of iterations should be equal to the files " + + "created") + .isEqualTo(TEST_FILES_NUMBER); + Assertions.assertThat(fileNames.size()) + .describedAs("After removing every iterm found from the iterator, " + + "there should be no more elements in the fileNames") + .isEqualTo(0); + } + + @Test + public void testWithAbfsIteratorDisabledWithoutHasNext() throws Exception { + Path testDir = createTestDirectory(); + setPageSize(10); + setEnableAbfsIterator(false); + final List fileNames = createFilesUnderDirectory(TEST_FILES_NUMBER, + testDir, "testListPath"); + + RemoteIterator fsItr = + getFileSystem().listStatusIterator(testDir); + Assertions.assertThat(fsItr) + .describedAs("RemoteIterator should not be instance of " + + "AbfsListStatusRemoteIterator when it is disabled") + .isNotInstanceOf(AbfsListStatusRemoteIterator.class); + int itrCount = 0; + for (int i = 0; i < TEST_FILES_NUMBER; i++) { + FileStatus fileStatus = fsItr.next(); + String pathStr = fileStatus.getPath().toString(); + fileNames.remove(pathStr); + itrCount++; + } + Assertions.assertThatThrownBy(() -> fsItr.next()) + .describedAs( + "next() should throw NoSuchElementException since next has been " + + "called " + TEST_FILES_NUMBER + " times") + .isInstanceOf(NoSuchElementException.class); + Assertions.assertThat(itrCount) + .describedAs("Number of iterations should be equal to the files " + + "created") + .isEqualTo(TEST_FILES_NUMBER); + Assertions.assertThat(fileNames.size()) + .describedAs("After removing every iterm found from the iterator, " + + "there should be no more elements in the fileNames") + .isEqualTo(0); + } + + @Test + public void testNextWhenNoMoreElementsPresent() throws Exception { + Path testDir = createTestDirectory(); + setPageSize(10); + RemoteIterator fsItr = + new AbfsListStatusRemoteIterator(getFileSystem().getFileStatus(testDir), + getFileSystem().getAbfsStore()); + fsItr = Mockito.spy(fsItr); + Mockito.doReturn(false).when(fsItr).hasNext(); + + RemoteIterator finalFsItr = fsItr; + Assertions.assertThatThrownBy(() -> finalFsItr.next()) + .describedAs( + "next() should throw NoSuchElementException if hasNext() return " + + "false") + .isInstanceOf(NoSuchElementException.class); + } + + @Test + public void testHasNextForEmptyDir() throws Exception { + Path testDir = createTestDirectory(); + setPageSize(10); + RemoteIterator fsItr = getFileSystem() + .listStatusIterator(testDir); + Assertions.assertThat(fsItr.hasNext()) + .describedAs("hasNext returns false for empty directory") + .isFalse(); + } + + @Test + public void testHasNextForFile() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + String testFileName = "testFile"; + Path testFile = new Path(testFileName); + getFileSystem().create(testFile); + setPageSize(10); + RemoteIterator fsItr = fs.listStatusIterator(testFile); + Assertions.assertThat(fsItr.hasNext()) + .describedAs("hasNext returns true for file").isTrue(); + Assertions.assertThat(fsItr.next().getPath().toString()) + .describedAs("next returns the file itself") + .endsWith(testFileName); + } + + @Test + public void testIOException() throws Exception { + Path testDir = createTestDirectory(); + setPageSize(10); + getFileSystem().mkdirs(testDir); + + String exceptionMessage = "test exception"; + ListingSupport lsSupport =getMockListingSupport(exceptionMessage); + RemoteIterator fsItr = + new AbfsListStatusRemoteIterator(getFileSystem().getFileStatus(testDir), + lsSupport); + + Assertions.assertThatThrownBy(() -> fsItr.next()) + .describedAs( + "When ioException is not null and queue is empty exception should be " + + "thrown") + .isInstanceOf(IOException.class) + .hasMessage(exceptionMessage); + } + + @Test + public void testNonExistingPath() throws Throwable { + Path nonExistingDir = new Path("nonExistingPath"); + Assertions.assertThatThrownBy( + () -> getFileSystem().listStatusIterator(nonExistingDir)).describedAs( + "test the listStatusIterator call on a path which is not " + + "present should result in FileNotFoundException") + .isInstanceOf(FileNotFoundException.class); + } + + private ListingSupport getMockListingSupport(String exceptionMessage) { + return new ListingSupport() { + @Override + public FileStatus[] listStatus(Path path) throws IOException { + return null; + } + + @Override + public FileStatus[] listStatus(Path path, String startFrom) + throws IOException { + return null; + } + + @Override + public String listStatus(Path path, String startFrom, + List fileStatuses, boolean fetchAll, String continuation) + throws IOException { + throw new IOException(exceptionMessage); + } + }; + } + + private Path createTestDirectory() throws IOException { + String testDirectoryName = "testDirectory" + System.currentTimeMillis(); + Path testDirectory = new Path(testDirectoryName); + getFileSystem().mkdirs(testDirectory); + return testDirectory; + } + + private void setEnableAbfsIterator(boolean shouldEnable) throws IOException { + AzureBlobFileSystemStore abfsStore = getAbfsStore(getFileSystem()); + abfsStore.getAbfsConfiguration().setEnableAbfsListIterator(shouldEnable); + } + + private void setPageSize(int pageSize) throws IOException { + AzureBlobFileSystemStore abfsStore = getAbfsStore(getFileSystem()); + abfsStore.getAbfsConfiguration().setListMaxResults(pageSize); + } + + private List createFilesUnderDirectory(int numFiles, Path rootPath, + String filenamePrefix) + throws ExecutionException, InterruptedException, IOException { + final List> tasks = new ArrayList<>(); + final List fileNames = new ArrayList<>(); + ExecutorService es = Executors.newFixedThreadPool(10); + try { + for (int i = 0; i < numFiles; i++) { + final Path filePath = new Path(rootPath, filenamePrefix + i); + Callable callable = () -> { + getFileSystem().create(filePath); + fileNames.add(makeQualified(filePath).toString()); + return null; + }; + tasks.add(es.submit(callable)); + } + for (Future task : tasks) { + task.get(); + } + } finally { + es.shutdownNow(); + } + return fileNames; + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java new file mode 100644 index 0000000000000..66b8da89572a1 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java @@ -0,0 +1,325 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.junit.Test; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; + +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.BYTES_RECEIVED; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CONNECTIONS_MADE; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.GET_RESPONSES; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.SEND_REQUESTS; + +public class ITestAbfsNetworkStatistics extends AbstractAbfsIntegrationTest { + + private static final Logger LOG = + LoggerFactory.getLogger(ITestAbfsNetworkStatistics.class); + private static final int WRITE_OPERATION_LOOP_COUNT = 10; + + public ITestAbfsNetworkStatistics() throws Exception { + } + + /** + * Testing connections_made, send_request and bytes_send statistics in + * {@link AbfsRestOperation}. + */ + @Test + public void testAbfsHttpSendStatistics() throws IOException { + describe("Test to check correct values of statistics after Abfs http send " + + "request is done."); + + AzureBlobFileSystem fs = getFileSystem(); + Map metricMap; + Path sendRequestPath = path(getMethodName()); + String testNetworkStatsString = "http_send"; + + metricMap = fs.getInstrumentationMap(); + long expectedConnectionsMade = metricMap.get(CONNECTIONS_MADE.getStatName()); + long expectedRequestsSent = metricMap.get(SEND_REQUESTS.getStatName()); + long expectedBytesSent = 0; + + // -------------------------------------------------------------------- + // Operation: Creating AbfsOutputStream + try (AbfsOutputStream out = createAbfsOutputStreamWithFlushEnabled(fs, + sendRequestPath)) { + // Network stats calculation: For Creating AbfsOutputStream: + // 1 create request = 1 connection made and 1 send request + expectedConnectionsMade++; + expectedRequestsSent++; + // -------------------------------------------------------------------- + + // Operation: Write small data + // Network stats calculation: No additions. + // Data written is less than the buffer size and hence will not + // trigger any append request to store + out.write(testNetworkStatsString.getBytes()); + // -------------------------------------------------------------------- + + // Operation: HFlush + // Flushes all outstanding data (i.e. the current unfinished packet) + // from the client into the service on all DataNode replicas. + out.hflush(); + /* + * Network stats calculation: + * 3 possibilities here: + * A. As there is pending data to be written to store, this will result in: + * 1 append + 1 flush = 2 connections and 2 send requests + * + * B. If config "fs.azure.enable.small.write.optimization" is enabled, append + * and flush call will be merged for small data in buffer in this test. + * In which case it will be: + * 1 append+flush request = 1 connection and 1 send request + * + * C. If the path is configured for append Blob files to be used, hflush + * is a no-op. So in this case: + * 1 append = 1 connection and 1 send request + */ + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(sendRequestPath).toString()) + || (this.getConfiguration().isSmallWriteOptimizationEnabled())) { + expectedConnectionsMade++; + expectedRequestsSent++; + } else { + expectedConnectionsMade += 2; + expectedRequestsSent += 2; + } + expectedBytesSent += testNetworkStatsString.getBytes().length; + // -------------------------------------------------------------------- + + // Assertions + metricMap = fs.getInstrumentationMap(); + assertAbfsStatistics(CONNECTIONS_MADE, + expectedConnectionsMade, metricMap); + assertAbfsStatistics(SEND_REQUESTS, expectedRequestsSent, + metricMap); + assertAbfsStatistics(AbfsStatistic.BYTES_SENT, + expectedBytesSent, metricMap); + } + + // -------------------------------------------------------------------- + // Operation: AbfsOutputStream close. + // Network Stats calculation: 1 flush (with close) is send. + // 1 flush request = 1 connection and 1 send request + expectedConnectionsMade++; + expectedRequestsSent++; + // -------------------------------------------------------------------- + + // Operation: Re-create the file / create overwrite scenario + try (AbfsOutputStream out = createAbfsOutputStreamWithFlushEnabled(fs, + sendRequestPath)) { + /* + * Network Stats calculation: create overwrite + * There are 2 possibilities here. + * A. create overwrite results in 1 server call + * create with overwrite=true = 1 connection and 1 send request + * + * B. If config "fs.azure.enable.conditional.create.overwrite" is enabled, + * create overwrite=false (will fail in this case as file is indeed present) + * + getFileStatus to fetch the file ETag + * + create overwrite=true + * = 3 connections and 2 send requests + */ + if (this.getConfiguration().isConditionalCreateOverwriteEnabled()) { + expectedConnectionsMade += 3; + expectedRequestsSent += 2; + } else { + expectedConnectionsMade += 1; + expectedRequestsSent += 1; + } + // -------------------------------------------------------------------- + + // Operation: Multiple small appends + hflush + for (int i = 0; i < WRITE_OPERATION_LOOP_COUNT; i++) { + out.write(testNetworkStatsString.getBytes()); + // Network stats calculation: no-op. Small write + out.hflush(); + // Network stats calculation: Hflush + // refer to previous comments for hFlush network stats calcualtion + // possibilities + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(sendRequestPath).toString()) + || (this.getConfiguration().isSmallWriteOptimizationEnabled())) { + expectedConnectionsMade++; + expectedRequestsSent++; + } else { + expectedConnectionsMade += 2; + expectedRequestsSent += 2; + } + expectedBytesSent += testNetworkStatsString.getBytes().length; + } + // -------------------------------------------------------------------- + + // Assertions + metricMap = fs.getInstrumentationMap(); + assertAbfsStatistics(CONNECTIONS_MADE, expectedConnectionsMade, metricMap); + assertAbfsStatistics(SEND_REQUESTS, expectedRequestsSent, metricMap); + assertAbfsStatistics(AbfsStatistic.BYTES_SENT, expectedBytesSent, metricMap); + } + + } + + /** + * Testing get_response and bytes_received in {@link AbfsRestOperation}. + */ + @Test + public void testAbfsHttpResponseStatistics() throws IOException { + describe("Test to check correct values of statistics after Http " + + "Response is processed."); + + AzureBlobFileSystem fs = getFileSystem(); + Path getResponsePath = path(getMethodName()); + Map metricMap; + String testResponseString = "some response"; + + FSDataOutputStream out = null; + FSDataInputStream in = null; + long expectedConnectionsMade; + long expectedGetResponses; + long expectedBytesReceived; + + try { + // Creating a File and writing some bytes in it. + out = fs.create(getResponsePath); + out.write(testResponseString.getBytes()); + out.hflush(); + + // Set metric baseline + metricMap = fs.getInstrumentationMap(); + long bytesWrittenToFile = testResponseString.getBytes().length; + expectedConnectionsMade = metricMap.get(CONNECTIONS_MADE.getStatName()); + expectedGetResponses = metricMap.get(CONNECTIONS_MADE.getStatName()); + expectedBytesReceived = metricMap.get(BYTES_RECEIVED.getStatName()); + + // -------------------------------------------------------------------- + // Operation: Create AbfsInputStream + in = fs.open(getResponsePath); + // Network stats calculation: For Creating AbfsInputStream: + // 1 GetFileStatus request to fetch file size = 1 connection and 1 get response + expectedConnectionsMade++; + expectedGetResponses++; + // -------------------------------------------------------------------- + + // Operation: Read + int result = in.read(); + // Network stats calculation: For read: + // 1 read request = 1 connection and 1 get response + expectedConnectionsMade++; + expectedGetResponses++; + expectedBytesReceived += bytesWrittenToFile; + // -------------------------------------------------------------------- + + // Assertions + metricMap = fs.getInstrumentationMap(); + assertAbfsStatistics(CONNECTIONS_MADE, expectedConnectionsMade, metricMap); + assertAbfsStatistics(GET_RESPONSES, expectedGetResponses, metricMap); + assertAbfsStatistics(AbfsStatistic.BYTES_RECEIVED, expectedBytesReceived, metricMap); + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + + // -------------------------------------------------------------------- + // Operation: AbfsOutputStream close. + // Network Stats calculation: no op. + // -------------------------------------------------------------------- + + try { + + // Recreate file with different file size + // [Create and append related network stats checks are done in + // test method testAbfsHttpSendStatistics] + StringBuilder largeBuffer = new StringBuilder(); + out = fs.create(getResponsePath); + + for (int i = 0; i < WRITE_OPERATION_LOOP_COUNT; i++) { + out.write(testResponseString.getBytes()); + out.hflush(); + largeBuffer.append(testResponseString); + } + + // sync back to metric baseline + metricMap = fs.getInstrumentationMap(); + expectedConnectionsMade = metricMap.get(CONNECTIONS_MADE.getStatName()); + expectedGetResponses = metricMap.get(GET_RESPONSES.getStatName()); + // -------------------------------------------------------------------- + // Operation: Create AbfsInputStream + in = fs.open(getResponsePath); + // Network stats calculation: For Creating AbfsInputStream: + // 1 GetFileStatus for file size = 1 connection and 1 get response + expectedConnectionsMade++; + expectedGetResponses++; + // -------------------------------------------------------------------- + + // Operation: Read + in.read(0, largeBuffer.toString().getBytes(), 0, largeBuffer.toString().getBytes().length); + // Network stats calculation: Total data written is still lesser than + // a buffer size. Hence will trigger only one read to store. So result is: + // 1 read request = 1 connection and 1 get response + expectedConnectionsMade++; + expectedGetResponses++; + expectedBytesReceived += (WRITE_OPERATION_LOOP_COUNT * testResponseString.getBytes().length); + // -------------------------------------------------------------------- + + // Assertions + metricMap = fs.getInstrumentationMap(); + assertAbfsStatistics(CONNECTIONS_MADE, expectedConnectionsMade, metricMap); + assertAbfsStatistics(GET_RESPONSES, expectedGetResponses, metricMap); + assertAbfsStatistics(AbfsStatistic.BYTES_RECEIVED, expectedBytesReceived, metricMap); + } finally { + IOUtils.cleanupWithLogger(LOG, out, in); + } + } + + /** + * Testing bytes_received counter value when a response failure occurs. + */ + @Test + public void testAbfsHttpResponseFailure() throws IOException { + describe("Test to check the values of bytes received counter when a " + + "response is failed"); + + AzureBlobFileSystem fs = getFileSystem(); + Path responseFailurePath = path(getMethodName()); + Map metricMap; + FSDataOutputStream out = null; + + try { + //create an empty file + out = fs.create(responseFailurePath); + //Re-creating the file again on same path with false overwrite, this + // would cause a response failure with status code 409. + out = fs.create(responseFailurePath, false); + } catch (FileAlreadyExistsException faee) { + metricMap = fs.getInstrumentationMap(); + // Assert after catching the 409 error to check the counter values. + assertAbfsStatistics(AbfsStatistic.BYTES_RECEIVED, 0, metricMap); + } finally { + IOUtils.cleanupWithLogger(LOG, out); + } + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java new file mode 100644 index 0000000000000..392e80a0a782c --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java @@ -0,0 +1,264 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; + +import org.assertj.core.api.Assertions; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamStatisticsImpl; + +/** + * Test AbfsOutputStream statistics. + */ +public class ITestAbfsOutputStreamStatistics + extends AbstractAbfsIntegrationTest { + + private static final int OPERATIONS = 10; + private static final Logger LOG = + LoggerFactory.getLogger(ITestAbfsOutputStreamStatistics.class); + + public ITestAbfsOutputStreamStatistics() throws Exception { + } + + /** + * Tests to check bytes uploaded successfully in {@link AbfsOutputStream}. + */ + @Test + public void testAbfsOutputStreamUploadingBytes() throws IOException { + describe("Testing bytes uploaded successfully by AbfsOutputSteam"); + final AzureBlobFileSystem fs = getFileSystem(); + Path uploadBytesFilePath = path(getMethodName()); + String testBytesToUpload = "bytes"; + + try ( + AbfsOutputStream outForSomeBytes = createAbfsOutputStreamWithFlushEnabled( + fs, uploadBytesFilePath) + ) { + + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatisticsForUploadBytes = + getAbfsOutputStreamStatistics(outForSomeBytes); + + //Test for zero bytes To upload. + assertEquals("Mismatch in bytes to upload", 0, + abfsOutputStreamStatisticsForUploadBytes.getBytesToUpload()); + + outForSomeBytes.write(testBytesToUpload.getBytes()); + outForSomeBytes.flush(); + abfsOutputStreamStatisticsForUploadBytes = + getAbfsOutputStreamStatistics(outForSomeBytes); + + //Test for bytes to upload. + assertEquals("Mismatch in bytes to upload", + testBytesToUpload.getBytes().length, + abfsOutputStreamStatisticsForUploadBytes.getBytesToUpload()); + + //Test for successful bytes uploaded. + assertEquals("Mismatch in successful bytes uploaded", + testBytesToUpload.getBytes().length, + abfsOutputStreamStatisticsForUploadBytes.getBytesUploadSuccessful()); + + } + + try ( + AbfsOutputStream outForLargeBytes = createAbfsOutputStreamWithFlushEnabled( + fs, uploadBytesFilePath)) { + + for (int i = 0; i < OPERATIONS; i++) { + outForLargeBytes.write(testBytesToUpload.getBytes()); + } + outForLargeBytes.flush(); + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = + getAbfsOutputStreamStatistics(outForLargeBytes); + + //Test for bytes to upload. + assertEquals("Mismatch in bytes to upload", + OPERATIONS * (testBytesToUpload.getBytes().length), + abfsOutputStreamStatistics.getBytesToUpload()); + + //Test for successful bytes uploaded. + assertEquals("Mismatch in successful bytes uploaded", + OPERATIONS * (testBytesToUpload.getBytes().length), + abfsOutputStreamStatistics.getBytesUploadSuccessful()); + + } + } + + /** + * Tests to check correct values of queue shrunk operations in + * AbfsOutputStream. + * + * After writing data, AbfsOutputStream doesn't upload the data until + * flushed. Hence, flush() method is called after write() to test queue + * shrink operations. + */ + @Test + public void testAbfsOutputStreamQueueShrink() throws IOException { + describe("Testing queue shrink operations by AbfsOutputStream"); + final AzureBlobFileSystem fs = getFileSystem(); + Path queueShrinkFilePath = path(getMethodName()); + String testQueueShrink = "testQueue"; + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(queueShrinkFilePath).toString())) { + // writeOperationsQueue is not used for appendBlob, hence queueShrink is 0 + return; + } + + try (AbfsOutputStream outForOneOp = createAbfsOutputStreamWithFlushEnabled( + fs, queueShrinkFilePath)) { + + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = + getAbfsOutputStreamStatistics(outForOneOp); + + //Test for shrinking queue zero time. + assertEquals("Mismatch in queue shrunk operations", 0, + abfsOutputStreamStatistics.getQueueShrunkOps()); + + } + + /* + * After writing in the loop we flush inside the loop to ensure the write + * operation done in that loop is considered to be done which would help + * us triggering the shrinkWriteOperationQueue() method each time after + * the write operation. + * If we call flush outside the loop, then it will take all the write + * operations inside the loop as one write operation. + * + */ + try ( + AbfsOutputStream outForLargeOps = createAbfsOutputStreamWithFlushEnabled( + fs, queueShrinkFilePath)) { + for (int i = 0; i < OPERATIONS; i++) { + outForLargeOps.write(testQueueShrink.getBytes()); + outForLargeOps.flush(); + } + + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = + getAbfsOutputStreamStatistics(outForLargeOps); + /* + * After a write operation is done, it is in a task queue where it is + * removed. Hence, to get the correct expected value we get the size of + * the task queue from AbfsOutputStream and subtract it with total + * write operations done to get the number of queue shrinks done. + * + */ + assertEquals("Mismatch in queue shrunk operations", + OPERATIONS - outForLargeOps.getWriteOperationsSize(), + abfsOutputStreamStatistics.getQueueShrunkOps()); + } + + } + + /** + * Tests to check correct values of write current buffer operations done by + * AbfsOutputStream. + * + * After writing data, AbfsOutputStream doesn't upload data till flush() is + * called. Hence, flush() calls were made after write(). + */ + @Test + public void testAbfsOutputStreamWriteBuffer() throws IOException { + describe("Testing write current buffer operations by AbfsOutputStream"); + final AzureBlobFileSystem fs = getFileSystem(); + Path writeBufferFilePath = path(getMethodName()); + String testWriteBuffer = "Buffer"; + + try (AbfsOutputStream outForOneOp = createAbfsOutputStreamWithFlushEnabled( + fs, writeBufferFilePath)) { + + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = + getAbfsOutputStreamStatistics(outForOneOp); + + //Test for zero time writing buffer to service. + assertEquals("Mismatch in write current buffer operations", 0, + abfsOutputStreamStatistics.getWriteCurrentBufferOperations()); + + outForOneOp.write(testWriteBuffer.getBytes()); + outForOneOp.flush(); + + abfsOutputStreamStatistics = getAbfsOutputStreamStatistics(outForOneOp); + + //Test for one time writing buffer to service. + assertEquals("Mismatch in write current buffer operations", 1, + abfsOutputStreamStatistics.getWriteCurrentBufferOperations()); + } + + try ( + AbfsOutputStream outForLargeOps = createAbfsOutputStreamWithFlushEnabled( + fs, writeBufferFilePath)) { + + /* + * Need to flush each time after we write to actually write the data + * into the data store and thus, get the writeCurrentBufferToService() + * method triggered and increment the statistic. + */ + for (int i = 0; i < OPERATIONS; i++) { + outForLargeOps.write(testWriteBuffer.getBytes()); + outForLargeOps.flush(); + } + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = + getAbfsOutputStreamStatistics(outForLargeOps); + //Test for 10 times writing buffer to service. + assertEquals("Mismatch in write current buffer operations", + OPERATIONS, + abfsOutputStreamStatistics.getWriteCurrentBufferOperations()); + } + } + + /** + * Test to check correct value of time spent on a PUT request in + * AbfsOutputStream. + */ + @Test + public void testAbfsOutputStreamDurationTrackerPutRequest() throws IOException { + describe("Testing to check if DurationTracker for PUT request is working " + + "correctly."); + AzureBlobFileSystem fs = getFileSystem(); + Path pathForPutRequest = path(getMethodName()); + + try(AbfsOutputStream outputStream = + createAbfsOutputStreamWithFlushEnabled(fs, pathForPutRequest)) { + outputStream.write('a'); + outputStream.hflush(); + + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = + getAbfsOutputStreamStatistics(outputStream); + LOG.info("AbfsOutputStreamStats info: {}", abfsOutputStreamStatistics.toString()); + Assertions.assertThat(abfsOutputStreamStatistics.getTimeSpentOnPutRequest()) + .describedAs("Mismatch in timeSpentOnPutRequest DurationTracker") + .isGreaterThan(0.0); + } + } + + /** + * Method to get the AbfsOutputStream statistics. + * + * @param out AbfsOutputStream whose statistics is needed. + * @return AbfsOutputStream statistics implementation class to get the + * values of the counters. + */ + private static AbfsOutputStreamStatisticsImpl getAbfsOutputStreamStatistics( + AbfsOutputStream out) { + return (AbfsOutputStreamStatisticsImpl) out.getOutputStreamStatistics(); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java index a270a00e9132e..52abb097ef311 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_READ_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MAX_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MIN_BUFFER_SIZE; @@ -46,6 +47,7 @@ public class ITestAbfsReadWriteAndSeek extends AbstractAbfsScaleTest { public static Iterable sizes() { return Arrays.asList(new Object[][]{{MIN_BUFFER_SIZE}, {DEFAULT_READ_BUFFER_SIZE}, + {APPENDBLOB_MAX_WRITE_BUFFER_SIZE}, {MAX_BUFFER_SIZE}}); } @@ -70,6 +72,7 @@ private void testReadWriteAndSeek(int bufferSize) throws Exception { final byte[] b = new byte[2 * bufferSize]; new Random().nextBytes(b); + try (FSDataOutputStream stream = fs.create(TEST_PATH)) { stream.write(b); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java index 6f6982652e49f..1d86de7ebeb5d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java @@ -79,12 +79,13 @@ public void testAbfsRestOperationExceptionFormat() throws IOException { } @Test - public void testRequestRetryConfig() throws Exception { - testRetryLogic(0); - testRetryLogic(3); + public void testCustomTokenFetchRetryCount() throws Exception { + testWithDifferentCustomTokenFetchRetry(0); + testWithDifferentCustomTokenFetchRetry(3); + testWithDifferentCustomTokenFetchRetry(5); } - public void testRetryLogic(int numOfRetries) throws Exception { + public void testWithDifferentCustomTokenFetchRetry(int numOfRetries) throws Exception { AzureBlobFileSystem fs = this.getFileSystem(); Configuration config = new Configuration(this.getRawConfiguration()); @@ -93,7 +94,7 @@ public void testRetryLogic(int numOfRetries) throws Exception { config.set("fs.azure.account.auth.type." + accountName, "Custom"); config.set("fs.azure.account.oauth.provider.type." + accountName, "org.apache.hadoop.fs" + ".azurebfs.oauth2.RetryTestTokenProvider"); - config.set("fs.azure.io.retry.max.retries", Integer.toString(numOfRetries)); + config.set("fs.azure.custom.token.fetch.retry.count", Integer.toString(numOfRetries)); // Stop filesystem creation as it will lead to calls to store. config.set("fs.azure.createRemoteFileSystemDuringInitialization", "false"); @@ -110,7 +111,7 @@ public void testRetryLogic(int numOfRetries) throws Exception { // Number of retries done should be as configured Assert.assertTrue( "Number of token fetch retries (" + RetryTestTokenProvider.reTryCount - + ") done, does not match with max " + "retry count configured (" + numOfRetries + + ") done, does not match with fs.azure.custom.token.fetch.retry.count configured (" + numOfRetries + ")", RetryTestTokenProvider.reTryCount == numOfRetries); } } \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java new file mode 100644 index 0000000000000..42205807c1b3e --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -0,0 +1,258 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; +import java.util.Map; + +import org.junit.Test; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; +import org.apache.hadoop.fs.permission.FsPermission; + +/** + * Tests AzureBlobFileSystem Statistics. + */ +public class ITestAbfsStatistics extends AbstractAbfsIntegrationTest { + + private static final int NUMBER_OF_OPS = 10; + + public ITestAbfsStatistics() throws Exception { + } + + /** + * Testing the initial value of statistics. + */ + @Test + public void testInitialStatsValues() throws IOException { + describe("Testing the initial values of Abfs counters"); + + AbfsCounters abfsCounters = + new AbfsCountersImpl(getFileSystem().getUri()); + Map metricMap = abfsCounters.toMap(); + + for (Map.Entry entry : metricMap.entrySet()) { + String key = entry.getKey(); + Long value = entry.getValue(); + + //Verify if initial value of statistic is 0. + checkInitialValue(key, value); + } + } + + /** + * Testing statistics by creating files and directories. + */ + @Test + public void testCreateStatistics() throws IOException { + describe("Testing counter values got by creating directories and files in" + + " Abfs"); + + AzureBlobFileSystem fs = getFileSystem(); + Path createFilePath = path(getMethodName()); + Path createDirectoryPath = path(getMethodName() + "Dir"); + + fs.mkdirs(createDirectoryPath); + fs.createNonRecursive(createFilePath, FsPermission + .getDefault(), false, 1024, (short) 1, 1024, null); + + Map metricMap = fs.getInstrumentationMap(); + /* + Test of statistic values after creating a directory and a file ; + getFileStatus is called 1 time after creating file and 1 time at time of + initialising. + */ + assertAbfsStatistics(AbfsStatistic.CALL_CREATE, 1, metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_CREATE_NON_RECURSIVE, 1, metricMap); + assertAbfsStatistics(AbfsStatistic.FILES_CREATED, 1, metricMap); + assertAbfsStatistics(AbfsStatistic.DIRECTORIES_CREATED, 1, metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_MKDIRS, 1, metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_GET_FILE_STATUS, 2, metricMap); + + //re-initialising Abfs to reset statistic values. + fs.initialize(fs.getUri(), fs.getConf()); + + /* + Creating 10 directories and files; Directories and files can't be created + with same name, hence + i to give unique names. + */ + for (int i = 0; i < NUMBER_OF_OPS; i++) { + fs.mkdirs(path(getMethodName() + "Dir" + i)); + fs.createNonRecursive(path(getMethodName() + i), + FsPermission.getDefault(), false, 1024, (short) 1, + 1024, null); + } + + metricMap = fs.getInstrumentationMap(); + /* + Test of statistics values after creating 10 directories and files; + getFileStatus is called 1 time at initialise() plus number of times file + is created. + */ + assertAbfsStatistics(AbfsStatistic.CALL_CREATE, NUMBER_OF_OPS, metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_CREATE_NON_RECURSIVE, NUMBER_OF_OPS, + metricMap); + assertAbfsStatistics(AbfsStatistic.FILES_CREATED, NUMBER_OF_OPS, metricMap); + assertAbfsStatistics(AbfsStatistic.DIRECTORIES_CREATED, NUMBER_OF_OPS, + metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_MKDIRS, NUMBER_OF_OPS, metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_GET_FILE_STATUS, + 1 + NUMBER_OF_OPS, metricMap); + } + + /** + * Testing statistics by deleting files and directories. + */ + @Test + public void testDeleteStatistics() throws IOException { + describe("Testing counter values got by deleting directory and files " + + "in Abfs"); + + AzureBlobFileSystem fs = getFileSystem(); + /* + This directory path needs to be root for triggering the + directories_deleted counter. + */ + Path createDirectoryPath = path("/"); + Path createFilePath = path(getMethodName()); + + /* + creating a directory and a file inside that directory. + The directory is root. Hence, no parent. This allows us to invoke + deleteRoot() method to see the population of directories_deleted and + files_deleted counters. + */ + fs.mkdirs(createDirectoryPath); + fs.create(path(createDirectoryPath + getMethodName())); + fs.delete(createDirectoryPath, true); + + Map metricMap = fs.getInstrumentationMap(); + + /* + Test for op_delete, files_deleted, op_list_status. + since directory is delete recursively op_delete is called 2 times. + 1 file is deleted, 1 listStatus() call is made. + */ + assertAbfsStatistics(AbfsStatistic.CALL_DELETE, 2, metricMap); + assertAbfsStatistics(AbfsStatistic.FILES_DELETED, 1, metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_LIST_STATUS, 1, metricMap); + + /* + creating a root directory and deleting it recursively to see if + directories_deleted is called or not. + */ + fs.mkdirs(createDirectoryPath); + fs.create(createFilePath); + fs.delete(createDirectoryPath, true); + metricMap = fs.getInstrumentationMap(); + + //Test for directories_deleted. + assertAbfsStatistics(AbfsStatistic.DIRECTORIES_DELETED, 1, metricMap); + } + + /** + * Testing statistics of open, append, rename and exists method calls. + */ + @Test + public void testOpenAppendRenameExists() throws IOException { + describe("Testing counter values on calling open, append and rename and " + + "exists methods on Abfs"); + + AzureBlobFileSystem fs = getFileSystem(); + Path createFilePath = path(getMethodName()); + Path destCreateFilePath = path(getMethodName() + "New"); + + fs.create(createFilePath); + fs.open(createFilePath); + fs.append(createFilePath); + assertTrue(fs.rename(createFilePath, destCreateFilePath)); + + Map metricMap = fs.getInstrumentationMap(); + //Testing single method calls to open, append and rename. + assertAbfsStatistics(AbfsStatistic.CALL_OPEN, 1, metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_APPEND, 1, metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_RENAME, 1, metricMap); + + //Testing if file exists at path. + assertTrue(String.format("File with name %s should exist", + destCreateFilePath), + fs.exists(destCreateFilePath)); + assertFalse(String.format("File with name %s should not exist", + createFilePath), + fs.exists(createFilePath)); + + metricMap = fs.getInstrumentationMap(); + //Testing exists() calls. + assertAbfsStatistics(AbfsStatistic.CALL_EXIST, 2, metricMap); + + //re-initialising Abfs to reset statistic values. + fs.initialize(fs.getUri(), fs.getConf()); + + fs.create(destCreateFilePath); + + for (int i = 0; i < NUMBER_OF_OPS; i++) { + fs.open(destCreateFilePath); + fs.append(destCreateFilePath); + } + + metricMap = fs.getInstrumentationMap(); + + //Testing large number of method calls to open, append. + assertAbfsStatistics(AbfsStatistic.CALL_OPEN, NUMBER_OF_OPS, metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_APPEND, NUMBER_OF_OPS, metricMap); + + for (int i = 0; i < NUMBER_OF_OPS; i++) { + // rename and then back to earlier name for no error while looping. + assertTrue(fs.rename(destCreateFilePath, createFilePath)); + assertTrue(fs.rename(createFilePath, destCreateFilePath)); + + //check if first name is existing and 2nd is not existing. + assertTrue(String.format("File with name %s should exist", + destCreateFilePath), + fs.exists(destCreateFilePath)); + assertFalse(String.format("File with name %s should not exist", + createFilePath), + fs.exists(createFilePath)); + + } + + metricMap = fs.getInstrumentationMap(); + + /* + Testing exists() calls and rename calls. Since both were called 2 + times in 1 loop. 2*numberOfOps is expectedValue. + */ + assertAbfsStatistics(AbfsStatistic.CALL_RENAME, 2 * NUMBER_OF_OPS, + metricMap); + assertAbfsStatistics(AbfsStatistic.CALL_EXIST, 2 * NUMBER_OF_OPS, + metricMap); + + } + + /** + * Method to check initial value of the statistics which should be 0. + * + * @param statName name of the statistic to be checked. + * @param statValue value of the statistic. + */ + private void checkInitialValue(String statName, long statValue) { + assertEquals("Mismatch in " + statName, 0, statValue); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java new file mode 100644 index 0000000000000..395a456124bdf --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java @@ -0,0 +1,173 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; + +/** + * Test Abfs Stream. + */ + +public class ITestAbfsStreamStatistics extends AbstractAbfsIntegrationTest { + public ITestAbfsStreamStatistics() throws Exception { + } + + private static final Logger LOG = + LoggerFactory.getLogger(ITestAbfsStreamStatistics.class); + + private static final int LARGE_NUMBER_OF_OPS = 999999; + + /*** + * Testing {@code incrementReadOps()} in class {@code AbfsInputStream} and + * {@code incrementWriteOps()} in class {@code AbfsOutputStream}. + * + * @throws Exception + */ + @Test + public void testAbfsStreamOps() throws Exception { + describe("Test to see correct population of read and write operations in " + + "Abfs"); + + final AzureBlobFileSystem fs = getFileSystem(); + Path smallOperationsFile = new Path("testOneReadWriteOps"); + Path largeOperationsFile = new Path("testLargeReadWriteOps"); + FileSystem.Statistics statistics = fs.getFsStatistics(); + String testReadWriteOps = "test this"; + statistics.reset(); + + //Test for zero write operation + assertReadWriteOps("write", 0, statistics.getWriteOps()); + + //Test for zero read operation + assertReadWriteOps("read", 0, statistics.getReadOps()); + + FSDataOutputStream outForOneOperation = null; + FSDataInputStream inForOneOperation = null; + try { + outForOneOperation = fs.create(smallOperationsFile); + statistics.reset(); + outForOneOperation.write(testReadWriteOps.getBytes()); + + //Test for a single write operation + assertReadWriteOps("write", 1, statistics.getWriteOps()); + + //Flushing output stream to see content to read + outForOneOperation.hflush(); + inForOneOperation = fs.open(smallOperationsFile); + statistics.reset(); + int result = inForOneOperation.read(testReadWriteOps.getBytes(), 0, + testReadWriteOps.getBytes().length); + + LOG.info("Result of Read operation : {}", result); + /* + * Testing if 2 read_ops value is coming after reading full content + * from a file (3 if anything to read from Buffer too). Reason: read() + * call gives read_ops=1, reading from AbfsClient(http GET) gives + * read_ops=2. + * + * In some cases ABFS-prefetch thread runs in the background which + * returns some bytes from buffer and gives an extra readOp. + * Thus, making readOps values arbitrary and giving intermittent + * failures in some cases. Hence, readOps values of 2 or 3 is seen in + * different setups. + * + */ + assertTrue(String.format("The actual value of %d was not equal to the " + + "expected value of 2 or 3", statistics.getReadOps()), + statistics.getReadOps() == 2 || statistics.getReadOps() == 3); + + } finally { + IOUtils.cleanupWithLogger(LOG, inForOneOperation, + outForOneOperation); + } + + //Validating if content is being written in the smallOperationsFile + assertTrue("Mismatch in content validation", + validateContent(fs, smallOperationsFile, + testReadWriteOps.getBytes())); + + FSDataOutputStream outForLargeOperations = null; + FSDataInputStream inForLargeOperations = null; + StringBuilder largeOperationsValidationString = new StringBuilder(); + try { + outForLargeOperations = fs.create(largeOperationsFile); + statistics.reset(); + int largeValue = LARGE_NUMBER_OF_OPS; + for (int i = 0; i < largeValue; i++) { + outForLargeOperations.write(testReadWriteOps.getBytes()); + + //Creating the String for content Validation + largeOperationsValidationString.append(testReadWriteOps); + } + LOG.info("Number of bytes of Large data written: {}", + largeOperationsValidationString.toString().getBytes().length); + + //Test for 1000000 write operations + assertReadWriteOps("write", largeValue, statistics.getWriteOps()); + + inForLargeOperations = fs.open(largeOperationsFile); + for (int i = 0; i < largeValue; i++) { + inForLargeOperations + .read(testReadWriteOps.getBytes(), 0, + testReadWriteOps.getBytes().length); + } + + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(largeOperationsFile).toString())) { + // for appendblob data is already flushed, so there is more data to read. + assertTrue(String.format("The actual value of %d was not equal to the " + + "expected value", statistics.getReadOps()), + statistics.getReadOps() == (largeValue + 3) || statistics.getReadOps() == (largeValue + 4)); + } else { + //Test for 1000000 read operations + assertReadWriteOps("read", largeValue, statistics.getReadOps()); + } + + } finally { + IOUtils.cleanupWithLogger(LOG, inForLargeOperations, + outForLargeOperations); + } + //Validating if content is being written in largeOperationsFile + assertTrue("Mismatch in content validation", + validateContent(fs, largeOperationsFile, + largeOperationsValidationString.toString().getBytes())); + + } + + /** + * Generic method to assert both Read an write operations. + * + * @param operation what operation is being asserted + * @param expectedValue value which is expected + * @param actualValue value which is actual + */ + + private void assertReadWriteOps(String operation, long expectedValue, + long actualValue) { + assertEquals("Mismatch in " + operation + " operations", expectedValue, + actualValue); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java index 94e0ce3f4826e..1278e652b3bbb 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java @@ -209,46 +209,55 @@ public void testGetFileStatusUnauthorized() throws Exception { @Test public void testSetOwnerUnauthorized() throws Exception { + Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); runTest(FileSystemOperations.SetOwner, true); } @Test public void testSetPermissionUnauthorized() throws Exception { + Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); runTest(FileSystemOperations.SetPermissions, true); } @Test public void testModifyAclEntriesUnauthorized() throws Exception { + Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); runTest(FileSystemOperations.ModifyAclEntries, true); } @Test public void testRemoveAclEntriesUnauthorized() throws Exception { + Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); runTest(FileSystemOperations.RemoveAclEntries, true); } @Test public void testRemoveDefaultAclUnauthorized() throws Exception { + Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); runTest(FileSystemOperations.RemoveDefaultAcl, true); } @Test public void testRemoveAclUnauthorized() throws Exception { + Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); runTest(FileSystemOperations.RemoveAcl, true); } @Test public void testSetAclUnauthorized() throws Exception { + Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); runTest(FileSystemOperations.SetAcl, true); } @Test public void testGetAclStatusAuthorized() throws Exception { + Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); runTest(FileSystemOperations.GetAcl, false); } @Test public void testGetAclStatusUnauthorized() throws Exception { + Assume.assumeTrue(this.getFileSystem().getIsNamespaceEnabled()); runTest(FileSystemOperations.GetAcl, true); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java index bc5fc59d9d97e..2c7b776ebe411 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java @@ -17,16 +17,20 @@ */ package org.apache.hadoop.fs.azurebfs; -import com.google.common.collect.Lists; - import java.io.FileNotFoundException; import java.io.IOException; +import java.lang.reflect.Field; import java.util.List; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.junit.Assume; import org.junit.Test; +import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider; +import org.apache.hadoop.fs.azurebfs.services.AuthType; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers; @@ -37,6 +41,9 @@ import org.apache.hadoop.security.AccessControlException; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ENABLE_CHECK_ACCESS; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET; @@ -44,9 +51,15 @@ import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CLIENT_ID; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CLIENT_SECRET; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; /** * Test cases for AzureBlobFileSystem.access() + * + * Some of the tests in this class require additional configs set in the test + * config file. + * Refer testing_azure.md for how to set the configs. + * */ public class ITestAzureBlobFileSystemCheckAccess extends AbstractAbfsIntegrationTest { @@ -72,25 +85,27 @@ private void setTestUserFs() throws Exception { if (this.testUserFs != null) { return; } - String orgClientId = getConfiguration().get(FS_AZURE_BLOB_FS_CLIENT_ID); - String orgClientSecret = getConfiguration() - .get(FS_AZURE_BLOB_FS_CLIENT_SECRET); - Boolean orgCreateFileSystemDurungInit = getConfiguration() - .getBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true); - getRawConfiguration().set(FS_AZURE_BLOB_FS_CLIENT_ID, - getConfiguration().get(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID)); - getRawConfiguration().set(FS_AZURE_BLOB_FS_CLIENT_SECRET, getConfiguration() - .get(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET)); - getRawConfiguration() - .setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, - false); - FileSystem fs = FileSystem.newInstance(getRawConfiguration()); - getRawConfiguration().set(FS_AZURE_BLOB_FS_CLIENT_ID, orgClientId); - getRawConfiguration().set(FS_AZURE_BLOB_FS_CLIENT_SECRET, orgClientSecret); - getRawConfiguration() - .setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, - orgCreateFileSystemDurungInit); - this.testUserFs = fs; + checkIfConfigIsSet(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT + + "." + getAccountName()); + Configuration conf = getRawConfiguration(); + setTestFsConf(FS_AZURE_BLOB_FS_CLIENT_ID, + FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID); + setTestFsConf(FS_AZURE_BLOB_FS_CLIENT_SECRET, + FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET); + conf.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.OAuth.name()); + conf.set(FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME + "." + + getAccountName(), ClientCredsTokenProvider.class.getName()); + conf.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, + false); + this.testUserFs = FileSystem.newInstance(getRawConfiguration()); + } + + private void setTestFsConf(final String fsConfKey, + final String testFsConfKey) { + final String confKeyWithAccountName = fsConfKey + "." + getAccountName(); + final String confValue = getConfiguration() + .getString(testFsConfKey, ""); + getRawConfiguration().set(confKeyWithAccountName, confValue); } @Test(expected = IllegalArgumentException.class) @@ -100,15 +115,17 @@ public void testCheckAccessWithNullPath() throws IOException { @Test(expected = NullPointerException.class) public void testCheckAccessForFileWithNullFsAction() throws Exception { - assumeHNSAndCheckAccessEnabled(); + Assume.assumeTrue(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false", + isHNSEnabled); + Assume.assumeTrue(FS_AZURE_ENABLE_CHECK_ACCESS + " is false", + isCheckAccessEnabled); // NPE when trying to convert null FsAction enum superUserFs.access(new Path("test.txt"), null); } @Test(expected = FileNotFoundException.class) public void testCheckAccessForNonExistentFile() throws Exception { - assumeHNSAndCheckAccessEnabled(); - setTestUserFs(); + checkPrerequisites(); Path nonExistentFile = setupTestDirectoryAndUserAccess( "/nonExistentFile1.txt", FsAction.ALL); superUserFs.delete(nonExistentFile, true); @@ -153,15 +170,38 @@ public void testCheckAccessForAccountWithoutNS() throws Exception { getConfiguration() .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, true)); Assume.assumeTrue(FS_AZURE_ENABLE_CHECK_ACCESS + " is false", - isCheckAccessEnabled); + isCheckAccessEnabled); + checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID); + checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET); + checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_USER_GUID); + setTestUserFs(); + + // When the driver does not know if the account is HNS enabled or not it + // makes a server call and fails + intercept(AccessControlException.class, + "\"This request is not authorized to perform this operation using " + + "this permission.\", 403", + () -> testUserFs.access(new Path("/"), FsAction.READ)); + + // When the driver has already determined if the account is HNS enabled + // or not, and as the account is non HNS the AzureBlobFileSystem#access + // acts as noop + AzureBlobFileSystemStore mockAbfsStore = + Mockito.mock(AzureBlobFileSystemStore.class); + Mockito.when(mockAbfsStore.getIsNamespaceEnabled()).thenReturn(true); + Field abfsStoreField = AzureBlobFileSystem.class.getDeclaredField( + "abfsStore"); + abfsStoreField.setAccessible(true); + abfsStoreField.set(testUserFs, mockAbfsStore); testUserFs.access(new Path("/"), FsAction.READ); + + superUserFs.access(new Path("/"), FsAction.READ); } @Test public void testFsActionNONE() throws Exception { - assumeHNSAndCheckAccessEnabled(); - setTestUserFs(); + checkPrerequisites(); Path testFilePath = setupTestDirectoryAndUserAccess("/test2.txt", FsAction.NONE); assertInaccessible(testFilePath, FsAction.EXECUTE); @@ -175,8 +215,7 @@ public void testFsActionNONE() throws Exception { @Test public void testFsActionEXECUTE() throws Exception { - assumeHNSAndCheckAccessEnabled(); - setTestUserFs(); + checkPrerequisites(); Path testFilePath = setupTestDirectoryAndUserAccess("/test3.txt", FsAction.EXECUTE); assertAccessible(testFilePath, FsAction.EXECUTE); @@ -191,8 +230,7 @@ public void testFsActionEXECUTE() throws Exception { @Test public void testFsActionREAD() throws Exception { - assumeHNSAndCheckAccessEnabled(); - setTestUserFs(); + checkPrerequisites(); Path testFilePath = setupTestDirectoryAndUserAccess("/test4.txt", FsAction.READ); assertAccessible(testFilePath, FsAction.READ); @@ -207,8 +245,7 @@ public void testFsActionREAD() throws Exception { @Test public void testFsActionWRITE() throws Exception { - assumeHNSAndCheckAccessEnabled(); - setTestUserFs(); + checkPrerequisites(); Path testFilePath = setupTestDirectoryAndUserAccess("/test5.txt", FsAction.WRITE); assertAccessible(testFilePath, FsAction.WRITE); @@ -223,8 +260,7 @@ public void testFsActionWRITE() throws Exception { @Test public void testFsActionREADEXECUTE() throws Exception { - assumeHNSAndCheckAccessEnabled(); - setTestUserFs(); + checkPrerequisites(); Path testFilePath = setupTestDirectoryAndUserAccess("/test6.txt", FsAction.READ_EXECUTE); assertAccessible(testFilePath, FsAction.EXECUTE); @@ -239,8 +275,7 @@ public void testFsActionREADEXECUTE() throws Exception { @Test public void testFsActionWRITEEXECUTE() throws Exception { - assumeHNSAndCheckAccessEnabled(); - setTestUserFs(); + checkPrerequisites(); Path testFilePath = setupTestDirectoryAndUserAccess("/test7.txt", FsAction.WRITE_EXECUTE); assertAccessible(testFilePath, FsAction.EXECUTE); @@ -255,8 +290,7 @@ public void testFsActionWRITEEXECUTE() throws Exception { @Test public void testFsActionALL() throws Exception { - assumeHNSAndCheckAccessEnabled(); - setTestUserFs(); + checkPrerequisites(); Path testFilePath = setupTestDirectoryAndUserAccess("/test8.txt", FsAction.ALL); assertAccessible(testFilePath, FsAction.EXECUTE); @@ -268,11 +302,22 @@ public void testFsActionALL() throws Exception { assertAccessible(testFilePath, FsAction.ALL); } - private void assumeHNSAndCheckAccessEnabled() { + private void checkPrerequisites() throws Exception { + setTestUserFs(); Assume.assumeTrue(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false", isHNSEnabled); Assume.assumeTrue(FS_AZURE_ENABLE_CHECK_ACCESS + " is false", isCheckAccessEnabled); + checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID); + checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET); + checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_USER_GUID); + } + + private void checkIfConfigIsSet(String configKey){ + AbfsConfiguration conf = getConfiguration(); + String value = conf.get(configKey); + Preconditions.checkArgument((value != null && value.trim().length() > 1), + configKey + " config is mandatory for the test to run"); } private void assertAccessible(Path testFilePath, FsAction fsAction) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index 94368a4f36955..09304d1ec218d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -21,18 +21,44 @@ import java.io.FileNotFoundException; import java.io.FilterOutputStream; import java.io.IOException; +import java.lang.reflect.Field; import java.util.EnumSet; +import java.util.UUID; import org.junit.Test; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.ConcurrentWriteOperationDetectedException; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.TestAbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; + +import static java.net.HttpURLConnection.HTTP_CONFLICT; +import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_OK; +import static java.net.HttpURLConnection.HTTP_PRECON_FAILED; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CONNECTIONS_MADE; /** * Test create operation. @@ -145,15 +171,19 @@ public void testTryWithResources() throws Throwable { out.hsync(); fail("Expected a failure"); } catch (FileNotFoundException fnfe) { - // the exception raised in close() must be in the caught exception's - // suppressed list - Throwable[] suppressed = fnfe.getSuppressed(); - assertEquals("suppressed count", 1, suppressed.length); - Throwable inner = suppressed[0]; - if (!(inner instanceof IOException)) { - throw inner; + //appendblob outputStream does not generate suppressed exception on close as it is + //single threaded code + if (!fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testPath).toString())) { + // the exception raised in close() must be in the caught exception's + // suppressed list + Throwable[] suppressed = fnfe.getSuppressed(); + assertEquals("suppressed count", 1, suppressed.length); + Throwable inner = suppressed[0]; + if (!(inner instanceof IOException)) { + throw inner; + } + GenericTestUtils.assertExceptionContains(fnfe.getMessage(), inner); } - GenericTestUtils.assertExceptionContains(fnfe.getMessage(), inner); } } @@ -184,4 +214,257 @@ public void testFilterFSWriteAfterClose() throws Throwable { }); } + /** + * Tests if the number of connections made for: + * 1. create overwrite=false of a file that doesnt pre-exist + * 2. create overwrite=false of a file that pre-exists + * 3. create overwrite=true of a file that doesnt pre-exist + * 4. create overwrite=true of a file that pre-exists + * matches the expectation when run against both combinations of + * fs.azure.enable.conditional.create.overwrite=true and + * fs.azure.enable.conditional.create.overwrite=false + * @throws Throwable + */ + @Test + public void testDefaultCreateOverwriteFileTest() throws Throwable { + testCreateFileOverwrite(true); + testCreateFileOverwrite(false); + } + + public void testCreateFileOverwrite(boolean enableConditionalCreateOverwrite) + throws Throwable { + final AzureBlobFileSystem currentFs = getFileSystem(); + Configuration config = new Configuration(this.getRawConfiguration()); + config.set("fs.azure.enable.conditional.create.overwrite", + Boolean.toString(enableConditionalCreateOverwrite)); + + final AzureBlobFileSystem fs = + (AzureBlobFileSystem) FileSystem.newInstance(currentFs.getUri(), + config); + + long totalConnectionMadeBeforeTest = fs.getInstrumentationMap() + .get(CONNECTIONS_MADE.getStatName()); + + int createRequestCount = 0; + final Path nonOverwriteFile = new Path("/NonOverwriteTest_FileName_" + + UUID.randomUUID().toString()); + + // Case 1: Not Overwrite - File does not pre-exist + // create should be successful + fs.create(nonOverwriteFile, false); + + // One request to server to create path should be issued + createRequestCount++; + + assertAbfsStatistics( + CONNECTIONS_MADE, + totalConnectionMadeBeforeTest + createRequestCount, + fs.getInstrumentationMap()); + + // Case 2: Not Overwrite - File pre-exists + intercept(FileAlreadyExistsException.class, + () -> fs.create(nonOverwriteFile, false)); + + // One request to server to create path should be issued + createRequestCount++; + + assertAbfsStatistics( + CONNECTIONS_MADE, + totalConnectionMadeBeforeTest + createRequestCount, + fs.getInstrumentationMap()); + + final Path overwriteFilePath = new Path("/OverwriteTest_FileName_" + + UUID.randomUUID().toString()); + + // Case 3: Overwrite - File does not pre-exist + // create should be successful + fs.create(overwriteFilePath, true); + + // One request to server to create path should be issued + createRequestCount++; + + assertAbfsStatistics( + CONNECTIONS_MADE, + totalConnectionMadeBeforeTest + createRequestCount, + fs.getInstrumentationMap()); + + // Case 4: Overwrite - File pre-exists + fs.create(overwriteFilePath, true); + + if (enableConditionalCreateOverwrite) { + // Three requests will be sent to server to create path, + // 1. create without overwrite + // 2. GetFileStatus to get eTag + // 3. create with overwrite + createRequestCount += 3; + } else { + createRequestCount++; + } + + assertAbfsStatistics( + CONNECTIONS_MADE, + totalConnectionMadeBeforeTest + createRequestCount, + fs.getInstrumentationMap()); + } + + /** + * Test negative scenarios with Create overwrite=false as default + * With create overwrite=true ending in 3 calls: + * A. Create overwrite=false + * B. GFS + * C. Create overwrite=true + * + * Scn1: A fails with HTTP409, leading to B which fails with HTTP404, + * detect parallel access + * Scn2: A fails with HTTP409, leading to B which fails with HTTP500, + * fail create with HTTP500 + * Scn3: A fails with HTTP409, leading to B and then C, + * which fails with HTTP412, detect parallel access + * Scn4: A fails with HTTP409, leading to B and then C, + * which fails with HTTP500, fail create with HTTP500 + * Scn5: A fails with HTTP500, fail create with HTTP500 + */ + @Test + public void testNegativeScenariosForCreateOverwriteDisabled() + throws Throwable { + + final AzureBlobFileSystem currentFs = getFileSystem(); + Configuration config = new Configuration(this.getRawConfiguration()); + config.set("fs.azure.enable.conditional.create.overwrite", + Boolean.toString(true)); + + final AzureBlobFileSystem fs = + (AzureBlobFileSystem) FileSystem.newInstance(currentFs.getUri(), + config); + + // Get mock AbfsClient with current config + AbfsClient + mockClient + = TestAbfsClient.getMockAbfsClient( + fs.getAbfsStore().getClient(), + fs.getAbfsStore().getAbfsConfiguration()); + + AzureBlobFileSystemStore abfsStore = fs.getAbfsStore(); + abfsStore = setAzureBlobSystemStoreField(abfsStore, "client", mockClient); + boolean isNamespaceEnabled = abfsStore.getIsNamespaceEnabled(); + + AbfsRestOperation successOp = mock( + AbfsRestOperation.class); + AbfsHttpOperation http200Op = mock( + AbfsHttpOperation.class); + when(http200Op.getStatusCode()).thenReturn(HTTP_OK); + when(successOp.getResult()).thenReturn(http200Op); + + AbfsRestOperationException conflictResponseEx + = getMockAbfsRestOperationException(HTTP_CONFLICT); + AbfsRestOperationException serverErrorResponseEx + = getMockAbfsRestOperationException(HTTP_INTERNAL_ERROR); + AbfsRestOperationException fileNotFoundResponseEx + = getMockAbfsRestOperationException(HTTP_NOT_FOUND); + AbfsRestOperationException preConditionResponseEx + = getMockAbfsRestOperationException(HTTP_PRECON_FAILED); + + // mock for overwrite=false + doThrow(conflictResponseEx) // Scn1: GFS fails with Http404 + .doThrow(conflictResponseEx) // Scn2: GFS fails with Http500 + .doThrow( + conflictResponseEx) // Scn3: create overwrite=true fails with Http412 + .doThrow( + conflictResponseEx) // Scn4: create overwrite=true fails with Http500 + .doThrow( + serverErrorResponseEx) // Scn5: create overwrite=false fails with Http500 + .when(mockClient) + .createPath(any(String.class), eq(true), eq(false), + isNamespaceEnabled ? any(String.class) : eq(null), + isNamespaceEnabled ? any(String.class) : eq(null), + any(boolean.class), eq(null)); + + doThrow(fileNotFoundResponseEx) // Scn1: GFS fails with Http404 + .doThrow(serverErrorResponseEx) // Scn2: GFS fails with Http500 + .doReturn(successOp) // Scn3: create overwrite=true fails with Http412 + .doReturn(successOp) // Scn4: create overwrite=true fails with Http500 + .when(mockClient) + .getPathStatus(any(String.class), eq(false)); + + // mock for overwrite=true + doThrow( + preConditionResponseEx) // Scn3: create overwrite=true fails with Http412 + .doThrow( + serverErrorResponseEx) // Scn4: create overwrite=true fails with Http500 + .when(mockClient) + .createPath(any(String.class), eq(true), eq(true), + isNamespaceEnabled ? any(String.class) : eq(null), + isNamespaceEnabled ? any(String.class) : eq(null), + any(boolean.class), eq(null)); + + // Scn1: GFS fails with Http404 + // Sequence of events expected: + // 1. create overwrite=false - fail with conflict + // 2. GFS - fail with File Not found + // Create will fail with ConcurrentWriteOperationDetectedException + validateCreateFileException(ConcurrentWriteOperationDetectedException.class, + abfsStore); + + // Scn2: GFS fails with Http500 + // Sequence of events expected: + // 1. create overwrite=false - fail with conflict + // 2. GFS - fail with Server error + // Create will fail with 500 + validateCreateFileException(AbfsRestOperationException.class, abfsStore); + + // Scn3: create overwrite=true fails with Http412 + // Sequence of events expected: + // 1. create overwrite=false - fail with conflict + // 2. GFS - pass + // 3. create overwrite=true - fail with Pre-Condition + // Create will fail with ConcurrentWriteOperationDetectedException + validateCreateFileException(ConcurrentWriteOperationDetectedException.class, + abfsStore); + + // Scn4: create overwrite=true fails with Http500 + // Sequence of events expected: + // 1. create overwrite=false - fail with conflict + // 2. GFS - pass + // 3. create overwrite=true - fail with Server error + // Create will fail with 500 + validateCreateFileException(AbfsRestOperationException.class, abfsStore); + + // Scn5: create overwrite=false fails with Http500 + // Sequence of events expected: + // 1. create overwrite=false - fail with server error + // Create will fail with 500 + validateCreateFileException(AbfsRestOperationException.class, abfsStore); + } + + private AzureBlobFileSystemStore setAzureBlobSystemStoreField( + final AzureBlobFileSystemStore abfsStore, + final String fieldName, + Object fieldObject) throws Exception { + + Field abfsClientField = AzureBlobFileSystemStore.class.getDeclaredField( + fieldName); + abfsClientField.setAccessible(true); + Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(abfsClientField, + abfsClientField.getModifiers() & ~java.lang.reflect.Modifier.FINAL); + abfsClientField.set(abfsStore, fieldObject); + return abfsStore; + } + + private void validateCreateFileException(final Class exceptionClass, final AzureBlobFileSystemStore abfsStore) + throws Exception { + FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, + FsAction.ALL); + FsPermission umask = new FsPermission(FsAction.NONE, FsAction.NONE, + FsAction.NONE); + Path testPath = new Path("testFile"); + intercept( + exceptionClass, + () -> abfsStore.createFile(testPath, null, true, permission, umask)); + } + + private AbfsRestOperationException getMockAbfsRestOperationException(int status) { + return new AbfsRestOperationException(status, "", "", new Exception()); + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java new file mode 100644 index 0000000000000..50ce257b4a844 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java @@ -0,0 +1,477 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.AccessDeniedException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +import org.assertj.core.api.Assertions; +import org.junit.Assume; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; +import org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys; +import org.apache.hadoop.fs.azurebfs.extensions.MockDelegationSASTokenProvider; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.fs.azurebfs.services.AuthType; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.AccessControlException; + +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_SAS_TOKEN_PROVIDER_TYPE; +import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH; +import static org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers.aclEntry; +import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; +import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; +import static org.apache.hadoop.fs.permission.AclEntryType.USER; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +/** + * Test Perform Authorization Check operation + */ +public class ITestAzureBlobFileSystemDelegationSAS extends AbstractAbfsIntegrationTest { + private static final String TEST_GROUP = UUID.randomUUID().toString(); + + private static final Logger LOG = + LoggerFactory.getLogger(ITestAzureBlobFileSystemDelegationSAS.class); + + public ITestAzureBlobFileSystemDelegationSAS() throws Exception { + // These tests rely on specific settings in azure-auth-keys.xml: + String sasProvider = getRawConfiguration().get(FS_AZURE_SAS_TOKEN_PROVIDER_TYPE); + Assume.assumeTrue(MockDelegationSASTokenProvider.class.getCanonicalName().equals(sasProvider)); + Assume.assumeNotNull(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_ID)); + Assume.assumeNotNull(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_SECRET)); + Assume.assumeNotNull(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_TENANT_ID)); + Assume.assumeNotNull(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID)); + // The test uses shared key to create a random filesystem and then creates another + // instance of this filesystem using SAS authorization. + Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey); + } + + @Override + public void setup() throws Exception { + boolean isHNSEnabled = this.getConfiguration().getBoolean( + TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false); + Assume.assumeTrue(isHNSEnabled); + createFilesystemForSASTests(); + super.setup(); + } + + @Test + // Test filesystem operations access, create, mkdirs, setOwner, getFileStatus + public void testCheckAccess() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + + Path rootPath = new Path("/"); + fs.setOwner(rootPath, MockDelegationSASTokenProvider.TEST_OWNER, null); + fs.setPermission(rootPath, new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.EXECUTE)); + FileStatus rootStatus = fs.getFileStatus(rootPath); + assertEquals("The directory permissions are not expected.", "rwxr-x--x", rootStatus.getPermission().toString()); + assertEquals("The directory owner is not expected.", + MockDelegationSASTokenProvider.TEST_OWNER, + rootStatus.getOwner()); + + Path dirPath = new Path(UUID.randomUUID().toString()); + fs.mkdirs(dirPath); + + Path filePath = new Path(dirPath, "file1"); + fs.create(filePath).close(); + fs.setPermission(filePath, new FsPermission(FsAction.READ, FsAction.READ, FsAction.NONE)); + + FileStatus dirStatus = fs.getFileStatus(dirPath); + FileStatus fileStatus = fs.getFileStatus(filePath); + + assertEquals("The owner is not expected.", MockDelegationSASTokenProvider.TEST_OWNER, dirStatus.getOwner()); + assertEquals("The owner is not expected.", MockDelegationSASTokenProvider.TEST_OWNER, fileStatus.getOwner()); + assertEquals("The directory permissions are not expected.", "rwxr-xr-x", dirStatus.getPermission().toString()); + assertEquals("The file permissions are not expected.", "r--r-----", fileStatus.getPermission().toString()); + + assertTrue(isAccessible(fs, dirPath, FsAction.READ_WRITE)); + assertFalse(isAccessible(fs, filePath, FsAction.READ_WRITE)); + + fs.setPermission(filePath, new FsPermission(FsAction.READ_WRITE, FsAction.READ, FsAction.NONE)); + fileStatus = fs.getFileStatus(filePath); + assertEquals("The file permissions are not expected.", "rw-r-----", fileStatus.getPermission().toString()); + assertTrue(isAccessible(fs, filePath, FsAction.READ_WRITE)); + + fs.setPermission(dirPath, new FsPermission(FsAction.EXECUTE, FsAction.NONE, FsAction.NONE)); + dirStatus = fs.getFileStatus(dirPath); + assertEquals("The file permissions are not expected.", "--x------", dirStatus.getPermission().toString()); + assertFalse(isAccessible(fs, dirPath, FsAction.READ_WRITE)); + assertTrue(isAccessible(fs, dirPath, FsAction.EXECUTE)); + + fs.setPermission(dirPath, new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE)); + dirStatus = fs.getFileStatus(dirPath); + assertEquals("The file permissions are not expected.", "---------", dirStatus.getPermission().toString()); + assertFalse(isAccessible(fs, filePath, FsAction.READ_WRITE)); + } + + private boolean isAccessible(FileSystem fs, Path path, FsAction fsAction) + throws IOException { + try { + fs.access(path, fsAction); + } catch (AccessControlException ace) { + return false; + } + return true; + } + + @Test + // Test filesystem operations create, create with overwrite, append and open. + // Test output stream operation write, flush and close + // Test input stream operation, read + public void testReadAndWrite() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path reqPath = new Path(UUID.randomUUID().toString()); + + final String msg1 = "purple"; + final String msg2 = "yellow"; + int expectedFileLength = msg1.length() * 2; + + byte[] readBuffer = new byte[1024]; + + // create file with content "purplepurple" + try (FSDataOutputStream stream = fs.create(reqPath)) { + stream.writeBytes(msg1); + stream.hflush(); + stream.writeBytes(msg1); + } + + // open file and verify content is "purplepurple" + try (FSDataInputStream stream = fs.open(reqPath)) { + int bytesRead = stream.read(readBuffer, 0, readBuffer.length); + assertEquals(expectedFileLength, bytesRead); + String fileContent = new String(readBuffer, 0, bytesRead, StandardCharsets.UTF_8); + assertEquals(msg1 + msg1, fileContent); + } + + // overwrite file with content "yellowyellow" + try (FSDataOutputStream stream = fs.create(reqPath)) { + stream.writeBytes(msg2); + stream.hflush(); + stream.writeBytes(msg2); + } + + // open file and verify content is "yellowyellow" + try (FSDataInputStream stream = fs.open(reqPath)) { + int bytesRead = stream.read(readBuffer, 0, readBuffer.length); + assertEquals(expectedFileLength, bytesRead); + String fileContent = new String(readBuffer, 0, bytesRead, StandardCharsets.UTF_8); + assertEquals(msg2 + msg2, fileContent); + } + + // append to file so final content is "yellowyellowpurplepurple" + try (FSDataOutputStream stream = fs.append(reqPath)) { + stream.writeBytes(msg1); + stream.hflush(); + stream.writeBytes(msg1); + } + + // open file and verify content is "yellowyellowpurplepurple" + try (FSDataInputStream stream = fs.open(reqPath)) { + int bytesRead = stream.read(readBuffer, 0, readBuffer.length); + assertEquals(2 * expectedFileLength, bytesRead); + String fileContent = new String(readBuffer, 0, bytesRead, StandardCharsets.UTF_8); + assertEquals(msg2 + msg2 + msg1 + msg1, fileContent); + } + } + + @Test + // Test rename file and rename folder + public void testRename() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path sourceDir = new Path(UUID.randomUUID().toString()); + Path sourcePath = new Path(sourceDir, UUID.randomUUID().toString()); + Path destinationPath = new Path(sourceDir, UUID.randomUUID().toString()); + Path destinationDir = new Path(UUID.randomUUID().toString()); + + // create file with content "hello" + try (FSDataOutputStream stream = fs.create(sourcePath)) { + stream.writeBytes("hello"); + } + + assertFalse(fs.exists(destinationPath)); + fs.rename(sourcePath, destinationPath); + assertFalse(fs.exists(sourcePath)); + assertTrue(fs.exists(destinationPath)); + + assertFalse(fs.exists(destinationDir)); + fs.rename(sourceDir, destinationDir); + assertFalse(fs.exists(sourceDir)); + assertTrue(fs.exists(destinationDir)); + } + + @Test + // Test delete file and delete folder + public void testDelete() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path dirPath = new Path(UUID.randomUUID().toString()); + Path filePath = new Path(dirPath, UUID.randomUUID().toString()); + + // create file with content "hello" + try (FSDataOutputStream stream = fs.create(filePath)) { + stream.writeBytes("hello"); + } + + assertTrue(fs.exists(filePath)); + fs.delete(filePath, false); + assertFalse(fs.exists(filePath)); + + assertTrue(fs.exists(dirPath)); + fs.delete(dirPath, false); + assertFalse(fs.exists(dirPath)); + } + + @Test + // Test delete folder recursive + public void testDeleteRecursive() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path dirPath = new Path(UUID.randomUUID().toString()); + Path filePath = new Path(dirPath, UUID.randomUUID().toString()); + + // create file with content "hello" + try (FSDataOutputStream stream = fs.create(filePath)) { + stream.writeBytes("hello"); + } + + assertTrue(fs.exists(dirPath)); + assertTrue(fs.exists(filePath)); + fs.delete(dirPath, true); + assertFalse(fs.exists(filePath)); + assertFalse(fs.exists(dirPath)); + } + + @Test + // Test list on file, directory and root path + public void testList() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path dirPath = new Path(UUID.randomUUID().toString()); + Path filePath = new Path(dirPath, UUID.randomUUID().toString()); + + fs.mkdirs(dirPath); + + // create file with content "hello" + try (FSDataOutputStream stream = fs.create(filePath)) { + stream.writeBytes("hello"); + } + + fs.listStatus(filePath); + fs.listStatus(dirPath); + fs.listStatus(new Path("/")); + } + + @Test + // Test filesystem operations setAcl, getAclStatus, removeAcl + // setPermissions and getFileStatus + public void testAcl() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path reqPath = new Path(UUID.randomUUID().toString()); + + fs.create(reqPath).close(); + + fs.setAcl(reqPath, Arrays + .asList(aclEntry(ACCESS, GROUP, TEST_GROUP, FsAction.ALL))); + + AclStatus acl = fs.getAclStatus(reqPath); + assertEquals(MockDelegationSASTokenProvider.TEST_OWNER, acl.getOwner()); + assertEquals("[group::r--, group:" + TEST_GROUP + ":rwx]", acl.getEntries().toString()); + + fs.removeAcl(reqPath); + acl = fs.getAclStatus(reqPath); + assertEquals("[]", acl.getEntries().toString()); + + fs.setPermission(reqPath, + new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); + + FileStatus status = fs.getFileStatus(reqPath); + assertEquals("rwx------", status.getPermission().toString()); + + acl = fs.getAclStatus(reqPath); + assertEquals("rwx------", acl.getPermission().toString()); + } + + @Test + // Test getFileStatus and getAclStatus operations on root path + public void testRootPath() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path rootPath = new Path(AbfsHttpConstants.ROOT_PATH); + + fs.setOwner(rootPath, MockDelegationSASTokenProvider.TEST_OWNER, null); + FileStatus status = fs.getFileStatus(rootPath); + assertEquals("rwxr-x---", status.getPermission().toString()); + assertEquals(MockDelegationSASTokenProvider.TEST_OWNER, status.getOwner()); + assertTrue(status.isDirectory()); + + AclStatus acl = fs.getAclStatus(rootPath); + assertEquals("rwxr-x---", acl.getPermission().toString()); + + List aclSpec = new ArrayList<>(); + int count = 0; + for (AclEntry entry: acl.getEntries()) { + aclSpec.add(entry); + if (entry.getScope() == AclEntryScope.DEFAULT) { + count++; + } + } + assertEquals(0, count); + + aclSpec.add(aclEntry(DEFAULT, USER, "cd548981-afec-4ab9-9d39-f6f2add2fd9b", FsAction.EXECUTE)); + + fs.modifyAclEntries(rootPath, aclSpec); + + acl = fs.getAclStatus(rootPath); + + count = 0; + for (AclEntry entry: acl.getEntries()) { + aclSpec.add(entry); + if (entry.getScope() == AclEntryScope.DEFAULT) { + count++; + } + } + assertEquals(5, count); + + fs.removeDefaultAcl(rootPath); + + acl = fs.getAclStatus(rootPath); + + count = 0; + for (AclEntry entry: acl.getEntries()) { + aclSpec.add(entry); + if (entry.getScope() == AclEntryScope.DEFAULT) { + count++; + } + } + assertEquals(0, count); + } + + @Test + // Test filesystem operations getXAttr and setXAttr + public void testProperties() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path reqPath = new Path(UUID.randomUUID().toString()); + + fs.create(reqPath).close(); + + final String propertyName = "user.mime_type"; + final byte[] propertyValue = "text/plain".getBytes("utf-8"); + fs.setXAttr(reqPath, propertyName, propertyValue); + + assertArrayEquals(propertyValue, fs.getXAttr(reqPath, propertyName)); + } + + @Test + public void testSignatureMask() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + String src = "/testABC/test.xt"; + fs.create(new Path(src)); + AbfsRestOperation abfsHttpRestOperation = fs.getAbfsClient() + .renamePath(src, "/testABC" + "/abc.txt", null); + AbfsHttpOperation result = abfsHttpRestOperation.getResult(); + String url = result.getSignatureMaskedUrl(); + String encodedUrl = result.getSignatureMaskedEncodedUrl(); + Assertions.assertThat(url.substring(url.indexOf("sig="))) + .describedAs("Signature query param should be masked") + .startsWith("sig=XXXX"); + Assertions.assertThat(encodedUrl.substring(encodedUrl.indexOf("sig%3D"))) + .describedAs("Signature query param should be masked") + .startsWith("sig%3DXXXX"); + } + + @Test + public void testSignatureMaskOnExceptionMessage() throws Exception { + intercept(IOException.class, "sig=XXXX", + () -> getFileSystem().getAbfsClient() + .renamePath("testABC/test.xt", "testABC/abc.txt", null)); + } + + @Test + // SetPermission should fail when saoid is not the owner and succeed when it is. + public void testSetPermissionForNonOwner() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + + Path rootPath = new Path("/"); + FileStatus rootStatus = fs.getFileStatus(rootPath); + assertEquals("The permissions are not expected.", + "rwxr-x---", + rootStatus.getPermission().toString()); + assertNotEquals("The owner is not expected.", + MockDelegationSASTokenProvider.TEST_OWNER, + rootStatus.getOwner()); + + // Attempt to set permission without being the owner. + intercept(AccessDeniedException.class, + AUTHORIZATION_PERMISSION_MISS_MATCH.getErrorCode(), () -> { + fs.setPermission(rootPath, new FsPermission(FsAction.ALL, + FsAction.READ_EXECUTE, FsAction.EXECUTE)); + return "Set permission should fail because saoid is not the owner."; + }); + + // Attempt to set permission as the owner. + fs.setOwner(rootPath, MockDelegationSASTokenProvider.TEST_OWNER, null); + fs.setPermission(rootPath, new FsPermission(FsAction.ALL, + FsAction.READ_EXECUTE, FsAction.EXECUTE)); + rootStatus = fs.getFileStatus(rootPath); + assertEquals("The permissions are not expected.", + "rwxr-x--x", + rootStatus.getPermission().toString()); + assertEquals("The directory owner is not expected.", + MockDelegationSASTokenProvider.TEST_OWNER, + rootStatus.getOwner()); + } + + @Test + // Without saoid or suoid, setPermission should succeed with sp=p for a non-owner. + public void testSetPermissionWithoutAgentForNonOwner() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path path = new Path(MockDelegationSASTokenProvider.NO_AGENT_PATH); + fs.create(path).close(); + + FileStatus status = fs.getFileStatus(path); + assertEquals("The permissions are not expected.", + "rw-r--r--", + status.getPermission().toString()); + assertNotEquals("The owner is not expected.", + TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID, + status.getOwner()); + + fs.setPermission(path, new FsPermission(FsAction.READ, FsAction.READ, FsAction.NONE)); + + FileStatus fileStatus = fs.getFileStatus(path); + assertEquals("The permissions are not expected.", + "r--r-----", + fileStatus.getPermission().toString()); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java index 486daca4f1120..9bd82dbb03df6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java @@ -26,22 +26,48 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.assertj.core.api.Assertions; +import org.junit.Assume; import org.junit.Test; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.fs.azurebfs.services.TestAbfsClient; +import org.apache.hadoop.fs.azurebfs.services.TestAbfsPerfTracker; +import org.apache.hadoop.fs.azurebfs.utils.TestMockHelpers; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_OK; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.HTTP_METHOD_DELETE; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_DELETE_CONSIDERED_IDEMPOTENT; +import static org.apache.hadoop.fs.azurebfs.services.AbfsRestOperationType.DeletePath; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertDeleted; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; import static org.apache.hadoop.test.LambdaTestUtils.intercept; + /** * Test delete operation. */ public class ITestAzureBlobFileSystemDelete extends AbstractAbfsIntegrationTest { + private static final int REDUCED_RETRY_COUNT = 1; + private static final int REDUCED_MAX_BACKOFF_INTERVALS_MS = 5000; + public ITestAzureBlobFileSystemDelete() throws Exception { super(); } @@ -130,4 +156,117 @@ public Void call() throws Exception { assertPathDoesNotExist(fs, "deleted", dir); } + + @Test + public void testDeleteIdempotency() throws Exception { + Assume.assumeTrue(DEFAULT_DELETE_CONSIDERED_IDEMPOTENT); + // Config to reduce the retry and maxBackoff time for test run + AbfsConfiguration abfsConfig + = TestAbfsConfigurationFieldsValidation.updateRetryConfigs( + getConfiguration(), + REDUCED_RETRY_COUNT, REDUCED_MAX_BACKOFF_INTERVALS_MS); + + final AzureBlobFileSystem fs = getFileSystem(); + AbfsClient abfsClient = fs.getAbfsStore().getClient(); + AbfsClient testClient = TestAbfsClient.createTestClientFromCurrentContext( + abfsClient, + abfsConfig); + + // Mock instance of AbfsRestOperation + AbfsRestOperation op = mock(AbfsRestOperation.class); + // Set retryCount to non-zero + when(op.isARetriedRequest()).thenReturn(true); + + // Case 1: Mock instance of Http Operation response. This will return + // HTTP:Not Found + AbfsHttpOperation http404Op = mock(AbfsHttpOperation.class); + when(http404Op.getStatusCode()).thenReturn(HTTP_NOT_FOUND); + + // Mock delete response to 404 + when(op.getResult()).thenReturn(http404Op); + + Assertions.assertThat(testClient.deleteIdempotencyCheckOp(op) + .getResult() + .getStatusCode()) + .describedAs( + "Delete is considered idempotent by default and should return success.") + .isEqualTo(HTTP_OK); + + // Case 2: Mock instance of Http Operation response. This will return + // HTTP:Bad Request + AbfsHttpOperation http400Op = mock(AbfsHttpOperation.class); + when(http400Op.getStatusCode()).thenReturn(HTTP_BAD_REQUEST); + + // Mock delete response to 400 + when(op.getResult()).thenReturn(http400Op); + + Assertions.assertThat(testClient.deleteIdempotencyCheckOp(op) + .getResult() + .getStatusCode()) + .describedAs( + "Idempotency check to happen only for HTTP 404 response.") + .isEqualTo(HTTP_BAD_REQUEST); + + } + + @Test + public void testDeleteIdempotencyTriggerHttp404() throws Exception { + + final AzureBlobFileSystem fs = getFileSystem(); + AbfsClient client = TestAbfsClient.createTestClientFromCurrentContext( + fs.getAbfsStore().getClient(), + this.getConfiguration()); + + // Case 1: Not a retried case should throw error back + // Add asserts at AzureBlobFileSystemStore and AbfsClient levels + intercept(AbfsRestOperationException.class, + () -> fs.getAbfsStore().delete( + new Path("/NonExistingPath"), + false)); + + intercept(AbfsRestOperationException.class, + () -> client.deletePath( + "/NonExistingPath", + false, + null)); + + // mock idempotency check to mimic retried case + AbfsClient mockClient = TestAbfsClient.getMockAbfsClient( + fs.getAbfsStore().getClient(), + this.getConfiguration()); + AzureBlobFileSystemStore mockStore = mock(AzureBlobFileSystemStore.class); + mockStore = TestMockHelpers.setClassField(AzureBlobFileSystemStore.class, mockStore, + "client", mockClient); + mockStore = TestMockHelpers.setClassField(AzureBlobFileSystemStore.class, + mockStore, + "abfsPerfTracker", + TestAbfsPerfTracker.getAPerfTrackerInstance(this.getConfiguration())); + doCallRealMethod().when(mockStore).delete(new Path("/NonExistingPath"), false); + + // Case 2: Mimic retried case + // Idempotency check on Delete always returns success + AbfsRestOperation idempotencyRetOp = TestAbfsClient.getRestOp( + DeletePath, mockClient, HTTP_METHOD_DELETE, + TestAbfsClient.getTestUrl(mockClient, "/NonExistingPath"), + TestAbfsClient.getTestRequestHeaders(mockClient)); + idempotencyRetOp.hardSetResult(HTTP_OK); + + doReturn(idempotencyRetOp).when(mockClient).deleteIdempotencyCheckOp(any()); + when(mockClient.deletePath("/NonExistingPath", false, + null)).thenCallRealMethod(); + + Assertions.assertThat(mockClient.deletePath( + "/NonExistingPath", + false, + null) + .getResult() + .getStatusCode()) + .describedAs("Idempotency check reports successful " + + "delete. 200OK should be returned") + .isEqualTo(idempotencyRetOp.getResult().getStatusCode()); + + // Call from AzureBlobFileSystemStore should not fail either + mockStore.delete(new Path("/NonExistingPath"), false); + } + } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java index ebc9c07e53e59..05c3855f5c89d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java @@ -203,6 +203,9 @@ public void testWriteWithFileNotFoundException() throws Exception { public void testFlushWithFileNotFoundException() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); final Path testFilePath = new Path(methodName.getMethodName()); + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { + return; + } FSDataOutputStream stream = fs.create(testFilePath); assertTrue(fs.exists(testFilePath)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java index 60f7f7d23f02a..ec33257060278 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java @@ -41,6 +41,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasStreamCapabilities; +import static org.apache.hadoop.fs.contract.ContractTestUtils.assertLacksStreamCapabilities; + /** * Test flush operation. * This class cannot be run in parallel test mode--check comments in @@ -49,7 +52,8 @@ public class ITestAzureBlobFileSystemFlush extends AbstractAbfsScaleTest { private static final int BASE_SIZE = 1024; private static final int ONE_THOUSAND = 1000; - private static final int TEST_BUFFER_SIZE = 5 * ONE_THOUSAND * BASE_SIZE; + //3000 KB to support appenblob too + private static final int TEST_BUFFER_SIZE = 3 * ONE_THOUSAND * BASE_SIZE; private static final int ONE_MB = 1024 * 1024; private static final int FLUSH_TIMES = 200; private static final int THREAD_SLEEP_TIME = 1000; @@ -226,11 +230,15 @@ private void testFlush(boolean disableOutputStreamFlush) throws Exception { final Path testFilePath = path(methodName.getMethodName()); byte[] buffer = getRandomBytesArray(); - // The test case must write "fs.azure.write.request.size" bytes // to the stream in order for the data to be uploaded to storage. - assertEquals(fs.getAbfsStore().getAbfsConfiguration().getWriteBufferSize(), - buffer.length); + assertTrue(fs.getAbfsStore().getAbfsConfiguration().getWriteBufferSize() + <= buffer.length); + + boolean isAppendBlob = true; + if (!fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { + isAppendBlob = false; + } try (FSDataOutputStream stream = fs.create(testFilePath)) { stream.write(buffer); @@ -245,7 +253,8 @@ private void testFlush(boolean disableOutputStreamFlush) throws Exception { // Verify that the data can be read if disableOutputStreamFlush is // false; and otherwise cannot be read. - validate(fs.open(testFilePath), buffer, !disableOutputStreamFlush); + /* For Appendlob flush is not needed to update data on server */ + validate(fs.open(testFilePath), buffer, !disableOutputStreamFlush || isAppendBlob); } } @@ -267,10 +276,15 @@ public void testHflushWithFlushDisabled() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); byte[] buffer = getRandomBytesArray(); final Path testFilePath = path(methodName.getMethodName()); + boolean isAppendBlob = false; + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { + isAppendBlob = true; + } try (FSDataOutputStream stream = getStreamAfterWrite(fs, testFilePath, buffer, false)) { stream.hflush(); - validate(fs, testFilePath, buffer, false); + /* For Appendlob flush is not needed to update data on server */ + validate(fs, testFilePath, buffer, isAppendBlob); } } @@ -295,11 +309,12 @@ public void testStreamCapabilitiesWithFlushDisabled() throws Exception { final Path testFilePath = path(methodName.getMethodName()); try (FSDataOutputStream stream = getStreamAfterWrite(fs, testFilePath, buffer, false)) { - assertFalse(stream.hasCapability(StreamCapabilities.HFLUSH)); - assertFalse(stream.hasCapability(StreamCapabilities.HSYNC)); - assertFalse(stream.hasCapability(StreamCapabilities.DROPBEHIND)); - assertFalse(stream.hasCapability(StreamCapabilities.READAHEAD)); - assertFalse(stream.hasCapability(StreamCapabilities.UNBUFFER)); + assertLacksStreamCapabilities(stream, + StreamCapabilities.HFLUSH, + StreamCapabilities.HSYNC, + StreamCapabilities.DROPBEHIND, + StreamCapabilities.READAHEAD, + StreamCapabilities.UNBUFFER); } } @@ -309,11 +324,13 @@ public void testStreamCapabilitiesWithFlushEnabled() throws Exception { byte[] buffer = getRandomBytesArray(); final Path testFilePath = path(methodName.getMethodName()); try (FSDataOutputStream stream = getStreamAfterWrite(fs, testFilePath, buffer, true)) { - assertTrue(stream.hasCapability(StreamCapabilities.HFLUSH)); - assertTrue(stream.hasCapability(StreamCapabilities.HSYNC)); - assertFalse(stream.hasCapability(StreamCapabilities.DROPBEHIND)); - assertFalse(stream.hasCapability(StreamCapabilities.READAHEAD)); - assertFalse(stream.hasCapability(StreamCapabilities.UNBUFFER)); + assertHasStreamCapabilities(stream, + StreamCapabilities.HFLUSH, + StreamCapabilities.HSYNC); + assertLacksStreamCapabilities(stream, + StreamCapabilities.DROPBEHIND, + StreamCapabilities.READAHEAD, + StreamCapabilities.UNBUFFER); } } @@ -322,9 +339,14 @@ public void testHsyncWithFlushDisabled() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); byte[] buffer = getRandomBytesArray(); final Path testFilePath = path(methodName.getMethodName()); + boolean isAppendBlob = false; + if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(testFilePath).toString())) { + isAppendBlob = true; + } try (FSDataOutputStream stream = getStreamAfterWrite(fs, testFilePath, buffer, false)) { stream.hsync(); - validate(fs, testFilePath, buffer, false); + /* For Appendlob flush is not needed to update data on server */ + validate(fs, testFilePath, buffer, isAppendBlob); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java index 25a15679263b3..31f92d2bd3890 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java @@ -29,12 +29,15 @@ import org.junit.Test; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_LIST_MAX_RESULTS; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; @@ -55,7 +58,10 @@ public ITestAzureBlobFileSystemListStatus() throws Exception { @Test public void testListPath() throws Exception { - final AzureBlobFileSystem fs = getFileSystem(); + Configuration config = new Configuration(this.getRawConfiguration()); + config.set(AZURE_LIST_MAX_RESULTS, "5000"); + final AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem + .newInstance(getFileSystem().getUri(), config); final List> tasks = new ArrayList<>(); ExecutorService es = Executors.newFixedThreadPool(10); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java index 382d3966485f1..de476a6abce9b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java @@ -18,12 +18,18 @@ package org.apache.hadoop.fs.azurebfs; +import java.util.UUID; + import org.junit.Test; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CONNECTIONS_MADE; + /** * Test mkdir operation. */ @@ -45,4 +51,58 @@ public void testCreateDirWithExistingDir() throws Exception { public void testCreateRoot() throws Exception { assertMkdirs(getFileSystem(), new Path("/")); } + + /** + * Test mkdir for possible values of fs.azure.disable.default.create.overwrite + * @throws Exception + */ + @Test + public void testDefaultCreateOverwriteDirTest() throws Throwable { + // the config fs.azure.disable.default.create.overwrite should have no + // effect on mkdirs + testCreateDirOverwrite(true); + testCreateDirOverwrite(false); + } + + public void testCreateDirOverwrite(boolean enableConditionalCreateOverwrite) + throws Throwable { + final AzureBlobFileSystem currentFs = getFileSystem(); + Configuration config = new Configuration(this.getRawConfiguration()); + config.set("fs.azure.enable.conditional.create.overwrite", + Boolean.toString(enableConditionalCreateOverwrite)); + + final AzureBlobFileSystem fs = + (AzureBlobFileSystem) FileSystem.newInstance(currentFs.getUri(), + config); + + long totalConnectionMadeBeforeTest = fs.getInstrumentationMap() + .get(CONNECTIONS_MADE.getStatName()); + + int mkdirRequestCount = 0; + final Path dirPath = new Path("/DirPath_" + + UUID.randomUUID().toString()); + + // Case 1: Dir does not pre-exist + fs.mkdirs(dirPath); + + // One request to server + mkdirRequestCount++; + + assertAbfsStatistics( + CONNECTIONS_MADE, + totalConnectionMadeBeforeTest + mkdirRequestCount, + fs.getInstrumentationMap()); + + // Case 2: Dir pre-exists + // Mkdir on existing Dir path will not lead to failure + fs.mkdirs(dirPath); + + // One request to server + mkdirRequestCount++; + + assertAbfsStatistics( + CONNECTIONS_MADE, + totalConnectionMadeBeforeTest + mkdirRequestCount, + fs.getInstrumentationMap()); + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java index 533f47125654e..e517f685784e7 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java @@ -22,18 +22,21 @@ import java.io.InputStream; import java.util.Map; -import org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys; import org.junit.Assume; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode; import org.apache.hadoop.fs.azurebfs.services.AuthType; +import org.apache.hadoop.io.IOUtils; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_SECRET; @@ -52,6 +55,8 @@ public class ITestAzureBlobFileSystemOauth extends AbstractAbfsIntegrationTest{ private static final Path FILE_PATH = new Path("/testFile"); private static final Path EXISTED_FILE_PATH = new Path("/existedFile"); private static final Path EXISTED_FOLDER_PATH = new Path("/existedFolder"); + private static final Logger LOG = + LoggerFactory.getLogger(ITestAbfsStreamStatistics.class); public ITestAzureBlobFileSystemOauth() throws Exception { Assume.assumeTrue(this.getAuthType() == AuthType.OAuth); @@ -143,9 +148,11 @@ public void testBlobDataReader() throws Exception { // TEST WRITE FILE try { - abfsStore.openFileForWrite(EXISTED_FILE_PATH, true); + abfsStore.openFileForWrite(EXISTED_FILE_PATH, fs.getFsStatistics(), true); } catch (AbfsRestOperationException e) { assertEquals(AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH, e.getErrorCode()); + } finally { + IOUtils.cleanupWithLogger(LOG, abfsStore); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java index e5f64b5f2c0a9..ef531acb2bbbc 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java @@ -21,12 +21,15 @@ import java.io.IOException; import java.util.Random; import java.util.concurrent.Callable; +import java.util.UUID; import org.junit.Assume; +import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSExceptionMessages; @@ -36,30 +39,43 @@ import org.apache.hadoop.fs.azure.NativeAzureFileSystem; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStream; +import org.apache.hadoop.fs.azurebfs.services.TestAbfsInputStream; + import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.BYTES_RECEIVED; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.GET_RESPONSES; +import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.ETAG; /** * Test random read operation. */ public class ITestAzureBlobFileSystemRandomRead extends AbstractAbfsScaleTest { + private static final int BYTE = 1; + private static final int THREE_BYTES = 3; + private static final int FIVE_BYTES = 5; + private static final int TWENTY_BYTES = 20; + private static final int THIRTY_BYTES = 30; private static final int KILOBYTE = 1024; private static final int MEGABYTE = KILOBYTE * KILOBYTE; + private static final int FOUR_MB = 4 * MEGABYTE; + private static final int NINE_MB = 9 * MEGABYTE; private static final long TEST_FILE_SIZE = 8 * MEGABYTE; private static final int MAX_ELAPSEDTIMEMS = 20; private static final int SEQUENTIAL_READ_BUFFER_SIZE = 16 * KILOBYTE; - private static final int CREATE_BUFFER_SIZE = 26 * KILOBYTE; private static final int SEEK_POSITION_ONE = 2* KILOBYTE; private static final int SEEK_POSITION_TWO = 5 * KILOBYTE; private static final int SEEK_POSITION_THREE = 10 * KILOBYTE; private static final int SEEK_POSITION_FOUR = 4100 * KILOBYTE; - private static final Path TEST_FILE_PATH = new Path( - "/TestRandomRead.txt"); + private static final int ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE = 16 * MEGABYTE; + private static final int DISABLED_READAHEAD_DEPTH = 0; + + private static final String TEST_FILE_PREFIX = "/TestRandomRead"; private static final String WASB = "WASB"; private static final String ABFS = "ABFS"; - private static long testFileLength = 0; private static final Logger LOG = LoggerFactory.getLogger(ITestAzureBlobFileSystemRandomRead.class); @@ -70,9 +86,10 @@ public ITestAzureBlobFileSystemRandomRead() throws Exception { @Test public void testBasicRead() throws Exception { - assumeHugeFileExists(); + Path testPath = new Path(TEST_FILE_PREFIX + "_testBasicRead"); + assumeHugeFileExists(testPath); - try (FSDataInputStream inputStream = this.getFileSystem().open(TEST_FILE_PATH)) { + try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { byte[] buffer = new byte[3 * MEGABYTE]; // forward seek and read a kilobyte into first kilobyte of bufferV2 @@ -98,12 +115,14 @@ public void testBasicRead() throws Exception { public void testRandomRead() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", this.getFileSystem().getIsNamespaceEnabled()); - assumeHugeFileExists(); + Path testPath = new Path(TEST_FILE_PREFIX + "_testRandomRead"); + assumeHugeFileExists(testPath); + try ( FSDataInputStream inputStreamV1 - = this.getFileSystem().open(TEST_FILE_PATH); + = this.getFileSystem().open(testPath); FSDataInputStream inputStreamV2 - = this.getWasbFileSystem().open(TEST_FILE_PATH); + = this.getWasbFileSystem().open(testPath); ) { final int bufferSize = 4 * KILOBYTE; byte[] bufferV1 = new byte[bufferSize]; @@ -155,8 +174,10 @@ public void testRandomRead() throws Exception { */ @Test public void testSeekToNewSource() throws Exception { - assumeHugeFileExists(); - try (FSDataInputStream inputStream = this.getFileSystem().open(TEST_FILE_PATH)) { + Path testPath = new Path(TEST_FILE_PREFIX + "_testSeekToNewSource"); + assumeHugeFileExists(testPath); + + try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { assertFalse(inputStream.seekToNewSource(0)); } } @@ -168,8 +189,10 @@ public void testSeekToNewSource() throws Exception { */ @Test public void testSkipBounds() throws Exception { - assumeHugeFileExists(); - try (FSDataInputStream inputStream = this.getFileSystem().open(TEST_FILE_PATH)) { + Path testPath = new Path(TEST_FILE_PREFIX + "_testSkipBounds"); + long testFileLength = assumeHugeFileExists(testPath); + + try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); long skipped = inputStream.skip(-1); @@ -207,8 +230,10 @@ public Long call() throws Exception { */ @Test public void testValidateSeekBounds() throws Exception { - assumeHugeFileExists(); - try (FSDataInputStream inputStream = this.getFileSystem().open(TEST_FILE_PATH)) { + Path testPath = new Path(TEST_FILE_PREFIX + "_testValidateSeekBounds"); + long testFileLength = assumeHugeFileExists(testPath); + + try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); inputStream.seek(0); @@ -256,8 +281,10 @@ public FSDataInputStream call() throws Exception { */ @Test public void testSeekAndAvailableAndPosition() throws Exception { - assumeHugeFileExists(); - try (FSDataInputStream inputStream = this.getFileSystem().open(TEST_FILE_PATH)) { + Path testPath = new Path(TEST_FILE_PREFIX + "_testSeekAndAvailableAndPosition"); + long testFileLength = assumeHugeFileExists(testPath); + + try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'}; byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'}; byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'}; @@ -320,8 +347,10 @@ public void testSeekAndAvailableAndPosition() throws Exception { */ @Test public void testSkipAndAvailableAndPosition() throws Exception { - assumeHugeFileExists(); - try (FSDataInputStream inputStream = this.getFileSystem().open(TEST_FILE_PATH)) { + Path testPath = new Path(TEST_FILE_PREFIX + "_testSkipAndAvailableAndPosition"); + long testFileLength = assumeHugeFileExists(testPath); + + try (FSDataInputStream inputStream = this.getFileSystem().open(testPath)) { byte[] expected1 = {(byte) 'a', (byte) 'b', (byte) 'c'}; byte[] expected2 = {(byte) 'd', (byte) 'e', (byte) 'f'}; byte[] expected3 = {(byte) 'b', (byte) 'c', (byte) 'd'}; @@ -384,15 +413,16 @@ public void testSkipAndAvailableAndPosition() throws Exception { @Test public void testSequentialReadAfterReverseSeekPerformance() throws Exception { - assumeHugeFileExists(); + Path testPath = new Path(TEST_FILE_PREFIX + "_testSequentialReadAfterReverseSeekPerformance"); + assumeHugeFileExists(testPath); final int maxAttempts = 10; final double maxAcceptableRatio = 1.01; double beforeSeekElapsedMs = 0, afterSeekElapsedMs = 0; double ratio = Double.MAX_VALUE; for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) { - beforeSeekElapsedMs = sequentialRead(ABFS, + beforeSeekElapsedMs = sequentialRead(ABFS, testPath, this.getFileSystem(), false); - afterSeekElapsedMs = sequentialRead(ABFS, + afterSeekElapsedMs = sequentialRead(ABFS, testPath, this.getFileSystem(), true); ratio = afterSeekElapsedMs / beforeSeekElapsedMs; LOG.info((String.format( @@ -412,11 +442,12 @@ public void testSequentialReadAfterReverseSeekPerformance() } @Test + @Ignore("HADOOP-16915") public void testRandomReadPerformance() throws Exception { Assume.assumeFalse("This test does not support namespace enabled account", this.getFileSystem().getIsNamespaceEnabled()); - createTestFile(); - assumeHugeFileExists(); + Path testPath = new Path(TEST_FILE_PREFIX + "_testRandomReadPerformance"); + assumeHugeFileExists(testPath); final AzureBlobFileSystem abFs = this.getFileSystem(); final NativeAzureFileSystem wasbFs = this.getWasbFileSystem(); @@ -426,8 +457,8 @@ public void testRandomReadPerformance() throws Exception { double v1ElapsedMs = 0, v2ElapsedMs = 0; double ratio = Double.MAX_VALUE; for (int i = 0; i < maxAttempts && ratio >= maxAcceptableRatio; i++) { - v1ElapsedMs = randomRead(1, wasbFs); - v2ElapsedMs = randomRead(2, abFs); + v1ElapsedMs = randomRead(1, testPath, wasbFs); + v2ElapsedMs = randomRead(2, testPath, abFs); ratio = v2ElapsedMs / v1ElapsedMs; @@ -446,15 +477,112 @@ public void testRandomReadPerformance() throws Exception { ratio < maxAcceptableRatio); } + /** + * With this test we should see a full buffer read being triggered in case + * alwaysReadBufferSize is on, else only the requested buffer size. + * Hence a seek done few bytes away from last read position will trigger + * a network read when alwaysReadBufferSize is off, whereas it will return + * from the internal buffer when it is on. + * Reading a full buffer size is the Gen1 behaviour. + * @throws Throwable + */ + @Test + public void testAlwaysReadBufferSizeConfig() throws Throwable { + testAlwaysReadBufferSizeConfig(false); + testAlwaysReadBufferSizeConfig(true); + } + + public void testAlwaysReadBufferSizeConfig(boolean alwaysReadBufferSizeConfigValue) + throws Throwable { + final AzureBlobFileSystem currentFs = getFileSystem(); + Configuration config = new Configuration(this.getRawConfiguration()); + config.set("fs.azure.readaheadqueue.depth", "0"); + config.set("fs.azure.read.alwaysReadBufferSize", + Boolean.toString(alwaysReadBufferSizeConfigValue)); + + final Path testFile = new Path("/FileName_" + + UUID.randomUUID().toString()); + + final AzureBlobFileSystem fs = createTestFile(testFile, 16 * MEGABYTE, + 1 * MEGABYTE, config); + String eTag = fs.getAbfsClient() + .getPathStatus(testFile.toUri().getPath(), false) + .getResult() + .getResponseHeader(ETAG); + + TestAbfsInputStream testInputStream = new TestAbfsInputStream(); + + AbfsInputStream inputStream = testInputStream.getAbfsInputStream( + fs.getAbfsClient(), + testFile.getName(), ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE, eTag, + DISABLED_READAHEAD_DEPTH, FOUR_MB, + alwaysReadBufferSizeConfigValue, FOUR_MB); + + long connectionsAtStart = fs.getInstrumentationMap() + .get(GET_RESPONSES.getStatName()); + + long dateSizeReadStatAtStart = fs.getInstrumentationMap() + .get(BYTES_RECEIVED.getStatName()); + + long newReqCount = 0; + long newDataSizeRead = 0; + + byte[] buffer20b = new byte[TWENTY_BYTES]; + byte[] buffer30b = new byte[THIRTY_BYTES]; + byte[] byteBuffer5 = new byte[FIVE_BYTES]; + + // first read + // if alwaysReadBufferSize is off, this is a sequential read + inputStream.read(byteBuffer5, 0, FIVE_BYTES); + newReqCount++; + newDataSizeRead += FOUR_MB; + + assertAbfsStatistics(GET_RESPONSES, connectionsAtStart + newReqCount, + fs.getInstrumentationMap()); + assertAbfsStatistics(BYTES_RECEIVED, + dateSizeReadStatAtStart + newDataSizeRead, fs.getInstrumentationMap()); + + // second read beyond that the buffer holds + // if alwaysReadBufferSize is off, this is a random read. Reads only + // incoming buffer size + // else, reads a buffer size + inputStream.seek(NINE_MB); + inputStream.read(buffer20b, 0, BYTE); + newReqCount++; + if (alwaysReadBufferSizeConfigValue) { + newDataSizeRead += FOUR_MB; + } else { + newDataSizeRead += TWENTY_BYTES; + } + + assertAbfsStatistics(GET_RESPONSES, connectionsAtStart + newReqCount, fs.getInstrumentationMap()); + assertAbfsStatistics(BYTES_RECEIVED, + dateSizeReadStatAtStart + newDataSizeRead, fs.getInstrumentationMap()); + + // third read adjacent to second but not exactly sequential. + // if alwaysReadBufferSize is off, this is another random read + // else second read would have read this too. + inputStream.seek(NINE_MB + TWENTY_BYTES + THREE_BYTES); + inputStream.read(buffer30b, 0, THREE_BYTES); + if (!alwaysReadBufferSizeConfigValue) { + newReqCount++; + newDataSizeRead += THIRTY_BYTES; + } + + assertAbfsStatistics(GET_RESPONSES, connectionsAtStart + newReqCount, fs.getInstrumentationMap()); + assertAbfsStatistics(BYTES_RECEIVED, dateSizeReadStatAtStart + newDataSizeRead, fs.getInstrumentationMap()); + } private long sequentialRead(String version, + Path testPath, FileSystem fs, boolean afterReverseSeek) throws IOException { byte[] buffer = new byte[SEQUENTIAL_READ_BUFFER_SIZE]; long totalBytesRead = 0; long bytesRead = 0; - try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) { + long testFileLength = fs.getFileStatus(testPath).getLen(); + try(FSDataInputStream inputStream = fs.open(testPath)) { if (afterReverseSeek) { while (bytesRead > 0 && totalBytesRead < 4 * MEGABYTE) { bytesRead = inputStream.read(buffer); @@ -485,14 +613,14 @@ private long sequentialRead(String version, } } - private long randomRead(int version, FileSystem fs) throws Exception { - assumeHugeFileExists(); + private long randomRead(int version, Path testPath, FileSystem fs) throws Exception { + assumeHugeFileExists(testPath); final long minBytesToRead = 2 * MEGABYTE; Random random = new Random(); byte[] buffer = new byte[8 * KILOBYTE]; long totalBytesRead = 0; long bytesRead = 0; - try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH)) { + try(FSDataInputStream inputStream = fs.open(testPath)) { ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); do { bytesRead = inputStream.read(buffer); @@ -524,28 +652,48 @@ private static double toMbps(long bytes, long milliseconds) { return bytes / 1000.0 * 8 / milliseconds; } - private void createTestFile() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - if (fs.exists(TEST_FILE_PATH)) { - FileStatus status = fs.getFileStatus(TEST_FILE_PATH); - if (status.getLen() >= TEST_FILE_SIZE) { - return; + private long createTestFile(Path testPath) throws Exception { + createTestFile(testPath, + TEST_FILE_SIZE, + MEGABYTE, + null); + + return TEST_FILE_SIZE; + } + + private AzureBlobFileSystem createTestFile(Path testFilePath, long testFileSize, + int createBufferSize, Configuration config) throws Exception { + AzureBlobFileSystem fs; + + if (config == null) { + config = this.getRawConfiguration(); + } + + final AzureBlobFileSystem currentFs = getFileSystem(); + fs = (AzureBlobFileSystem) FileSystem.newInstance(currentFs.getUri(), + config); + + if (fs.exists(testFilePath)) { + FileStatus status = fs.getFileStatus(testFilePath); + if (status.getLen() == testFileSize) { + return fs; } } - byte[] buffer = new byte[CREATE_BUFFER_SIZE]; + byte[] buffer = new byte[createBufferSize]; char character = 'a'; for (int i = 0; i < buffer.length; i++) { buffer[i] = (byte) character; character = (character == 'z') ? 'a' : (char) ((int) character + 1); } - LOG.info(String.format("Creating test file %s of size: %d ", TEST_FILE_PATH, TEST_FILE_SIZE)); + LOG.info(String.format("Creating test file %s of size: %d ", testFilePath, testFileSize)); ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); - try (FSDataOutputStream outputStream = fs.create(TEST_FILE_PATH)) { + try (FSDataOutputStream outputStream = fs.create(testFilePath)) { + String bufferContents = new String(buffer); int bytesWritten = 0; - while (bytesWritten < TEST_FILE_SIZE) { + while (bytesWritten < testFileSize) { outputStream.write(buffer); bytesWritten += buffer.length; } @@ -555,18 +703,18 @@ private void createTestFile() throws Exception { outputStream.close(); closeTimer.end("time to close() output stream"); } - timer.end("time to write %d KB", TEST_FILE_SIZE / 1024); - testFileLength = fs.getFileStatus(TEST_FILE_PATH).getLen(); - + timer.end("time to write %d KB", testFileSize / 1024); + return fs; } - private void assumeHugeFileExists() throws Exception{ - createTestFile(); + private long assumeHugeFileExists(Path testPath) throws Exception{ + long fileSize = createTestFile(testPath); FileSystem fs = this.getFileSystem(); - ContractTestUtils.assertPathExists(this.getFileSystem(), "huge file not created", TEST_FILE_PATH); - FileStatus status = fs.getFileStatus(TEST_FILE_PATH); - ContractTestUtils.assertIsFile(TEST_FILE_PATH, status); - assertTrue("File " + TEST_FILE_PATH + " is empty", status.getLen() > 0); + ContractTestUtils.assertPathExists(this.getFileSystem(), "huge file not created", testPath); + FileStatus status = fs.getFileStatus(testPath); + ContractTestUtils.assertIsFile(testPath, status); + assertTrue("File " + testPath + " is not of expected size " + fileSize + ":actual=" + status.getLen(), status.getLen() == fileSize); + return fileSize; } private void verifyConsistentReads(FSDataInputStream inputStreamV1, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index e0e1d899a2184..2adf70ca6457d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.azurebfs; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.concurrent.Callable; @@ -25,23 +26,45 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import org.junit.Assert; +import org.assertj.core.api.Assertions; import org.junit.Test; +import org.junit.Assert; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; +import org.apache.hadoop.fs.azurebfs.services.TestAbfsClient; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_OK; +import static java.util.UUID.randomUUID; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertRenameOutcome; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + /** * Test rename operation. */ public class ITestAzureBlobFileSystemRename extends AbstractAbfsIntegrationTest { + private static final int REDUCED_RETRY_COUNT = 1; + private static final int REDUCED_MAX_BACKOFF_INTERVALS_MS = 5000; + public ITestAzureBlobFileSystemRename() throws Exception { super(); } @@ -59,6 +82,16 @@ public void testEnsureFileIsRenamed() throws Exception { assertPathDoesNotExist(fs, "expected renamed", src); } + @Test + public void testRenameWithPreExistingDestination() throws Exception { + final AzureBlobFileSystem fs = getFileSystem(); + Path src = path("renameSrc"); + touch(src); + Path dest = path("renameDest"); + touch(dest); + assertRenameOutcome(fs, src, dest, false); + } + @Test public void testRenameFileUnderDir() throws Exception { final AzureBlobFileSystem fs = getFileSystem(); @@ -149,4 +182,150 @@ public void testPosixRenameDirectory() throws Exception { assertTrue(fs.exists(new Path("testDir2/test4/test3"))); assertFalse(fs.exists(new Path("testDir2/test1/test2/test3"))); } + + @Test + public void testRenameRetryFailureAsHTTP400() throws Exception { + // Rename failed as Bad Request + // RenameIdempotencyCheck should throw back the rename failure Op + testRenameTimeout(HTTP_BAD_REQUEST, HTTP_BAD_REQUEST, false, + "renameIdempotencyCheckOp should return rename BadRequest " + + "response itself."); + } + + @Test + public void testRenameRetryFailureAsHTTP404() throws Exception { + // Rename failed as FileNotFound and the destination LMT is + // within TimespanForIdentifyingRecentOperationThroughLMT + testRenameTimeout(HTTP_NOT_FOUND, HTTP_OK, false, + "Rename should return success response because the destination " + + "path is present and its LMT is within " + + "TimespanForIdentifyingRecentOperationThroughLMT."); + } + + @Test + public void testRenameRetryFailureWithDestOldLMT() throws Exception { + // Rename failed as FileNotFound and the destination LMT is + // older than TimespanForIdentifyingRecentOperationThroughLMT + testRenameTimeout(HTTP_NOT_FOUND, HTTP_NOT_FOUND, true, + "Rename should return original rename failure response " + + "because the destination path LMT is older than " + + "TimespanForIdentifyingRecentOperationThroughLMT."); + } + + @Test + public void testRenameIdempotencyTriggerHttpNotFound() throws Exception { + AbfsHttpOperation http404Op = mock(AbfsHttpOperation.class); + when(http404Op.getStatusCode()).thenReturn(HTTP_NOT_FOUND); + + AbfsHttpOperation http200Op = mock(AbfsHttpOperation.class); + when(http200Op.getStatusCode()).thenReturn(HTTP_OK); + + // Check 1 where idempotency check fails to find dest path + // Rename should throw exception + testRenameIdempotencyTriggerChecks(http404Op); + + // Check 2 where idempotency check finds the dest path + // Renam will be successful + testRenameIdempotencyTriggerChecks(http200Op); + } + + private void testRenameIdempotencyTriggerChecks( + AbfsHttpOperation idempotencyRetHttpOp) throws Exception { + + final AzureBlobFileSystem fs = getFileSystem(); + AbfsClient client = TestAbfsClient.getMockAbfsClient( + fs.getAbfsStore().getClient(), + this.getConfiguration()); + + AbfsRestOperation idempotencyRetOp = mock(AbfsRestOperation.class); + when(idempotencyRetOp.getResult()).thenReturn(idempotencyRetHttpOp); + doReturn(idempotencyRetOp).when(client).renameIdempotencyCheckOp(any(), + any(), any()); + when(client.renamePath(any(), any(), any())).thenCallRealMethod(); + + // rename on non-existing source file will trigger idempotency check + if (idempotencyRetHttpOp.getStatusCode() == HTTP_OK) { + // idempotency check found that destination exists and is recently created + Assertions.assertThat(client.renamePath( + "/NonExistingsourcepath", + "/destpath", + null) + .getResult() + .getStatusCode()) + .describedAs("Idempotency check reports recent successful " + + "rename. 200OK should be returned") + .isEqualTo(idempotencyRetOp.getResult().getStatusCode()); + } else { + // rename dest not found. Original exception should be returned. + intercept(AbfsRestOperationException.class, + () -> client.renamePath( + "/NonExistingsourcepath", + "/destpath", + "")); + } + } + + private void testRenameTimeout( + int renameRequestStatus, + int renameIdempotencyCheckStatus, + boolean isOldOp, + String assertMessage) throws Exception { + // Config to reduce the retry and maxBackoff time for test run + AbfsConfiguration abfsConfig + = TestAbfsConfigurationFieldsValidation.updateRetryConfigs( + getConfiguration(), + REDUCED_RETRY_COUNT, REDUCED_MAX_BACKOFF_INTERVALS_MS); + + final AzureBlobFileSystem fs = getFileSystem(); + AbfsClient abfsClient = fs.getAbfsStore().getClient(); + AbfsClient testClient = TestAbfsClient.createTestClientFromCurrentContext( + abfsClient, + abfsConfig); + + // Mock instance of AbfsRestOperation + AbfsRestOperation op = mock(AbfsRestOperation.class); + // Set retryCount to non-zero + when(op.isARetriedRequest()).thenReturn(true); + + // Mock instance of Http Operation response. This will return HTTP:Bad Request + AbfsHttpOperation http400Op = mock(AbfsHttpOperation.class); + when(http400Op.getStatusCode()).thenReturn(HTTP_BAD_REQUEST); + + // Mock instance of Http Operation response. This will return HTTP:Not Found + AbfsHttpOperation http404Op = mock(AbfsHttpOperation.class); + when(http404Op.getStatusCode()).thenReturn(HTTP_NOT_FOUND); + + Path destinationPath = fs.makeQualified( + new Path("destination" + randomUUID().toString())); + + Instant renameRequestStartTime = Instant.now(); + + if (renameRequestStatus == HTTP_BAD_REQUEST) { + when(op.getResult()).thenReturn(http400Op); + } else if (renameRequestStatus == HTTP_NOT_FOUND) { + // Create the file new. + fs.create(destinationPath); + when(op.getResult()).thenReturn(http404Op); + + if (isOldOp) { + // instead of sleeping for DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS + // which will affect test run time + // will modify renameRequestStartTime to a future time so that + // lmt will qualify for old op + renameRequestStartTime = renameRequestStartTime.plusSeconds( + DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS); + } + + } + + Assertions.assertThat(testClient.renameIdempotencyCheckOp( + renameRequestStartTime, + op, + destinationPath.toUri().getPath()) + .getResult() + .getStatusCode()) + .describedAs(assertMessage) + .isEqualTo(renameIdempotencyCheckStatus); + } + } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java index 397d1a6d9a19b..74cf02a4f1f68 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azurebfs; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.FileNotFoundException; import java.util.List; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java index 74c8803e4f6a8..29de126c4cc40 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java @@ -21,16 +21,29 @@ import java.io.IOException; import java.util.UUID; +import org.apache.hadoop.fs.azurebfs.enums.Trilean; import org.junit.Assume; import org.junit.Test; +import org.assertj.core.api.Assertions; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; -import org.apache.hadoop.fs.azurebfs.services.AuthType; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_FS_AZURE_ACCOUNT_IS_HNS_ENABLED; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.TEST_CONFIGURATION_FILE_NAME; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION; -import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_IS_HNS_ENABLED; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT; import static org.apache.hadoop.test.LambdaTestUtils.intercept; @@ -39,6 +52,9 @@ */ public class ITestGetNameSpaceEnabled extends AbstractAbfsIntegrationTest { + private static final String TRUE_STR = "true"; + private static final String FALSE_STR = "false"; + private boolean isUsingXNSAccount; public ITestGetNameSpaceEnabled() throws Exception { isUsingXNSAccount = getConfiguration().getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false); @@ -57,7 +73,57 @@ public void testNonXNSAccount() throws IOException { Assume.assumeFalse("Skip this test because the account being used for test is a XNS account", isUsingXNSAccount); assertFalse("Expecting getIsNamespaceEnabled() return false", - getFileSystem().getIsNamespaceEnabled()); + getFileSystem().getIsNamespaceEnabled()); + } + + @Test + public void testGetIsNamespaceEnabledWhenConfigIsTrue() throws Exception { + AzureBlobFileSystem fs = getNewFSWithHnsConf(TRUE_STR); + Assertions.assertThat(fs.getIsNamespaceEnabled()).describedAs( + "getIsNamespaceEnabled should return true when the " + + "config is set as true").isTrue(); + fs.getAbfsStore().deleteFilesystem(); + unsetAndAssert(); + } + + @Test + public void testGetIsNamespaceEnabledWhenConfigIsFalse() throws Exception { + AzureBlobFileSystem fs = getNewFSWithHnsConf(FALSE_STR); + Assertions.assertThat(fs.getIsNamespaceEnabled()).describedAs( + "getIsNamespaceEnabled should return false when the " + + "config is set as false").isFalse(); + fs.getAbfsStore().deleteFilesystem(); + unsetAndAssert(); + } + + private void unsetAndAssert() throws Exception { + AzureBlobFileSystem fs = getNewFSWithHnsConf( + DEFAULT_FS_AZURE_ACCOUNT_IS_HNS_ENABLED); + boolean expectedValue = this.getConfiguration() + .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false); + Assertions.assertThat(fs.getIsNamespaceEnabled()).describedAs( + "getIsNamespaceEnabled should return the value " + + "configured for fs.azure.test.namespace.enabled") + .isEqualTo(expectedValue); + fs.getAbfsStore().deleteFilesystem(); + } + + private AzureBlobFileSystem getNewFSWithHnsConf( + String isNamespaceEnabledAccount) throws Exception { + Configuration rawConfig = new Configuration(); + rawConfig.addResource(TEST_CONFIGURATION_FILE_NAME); + rawConfig.set(FS_AZURE_ACCOUNT_IS_HNS_ENABLED, isNamespaceEnabledAccount); + rawConfig + .setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true); + rawConfig.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, + getNonExistingUrl()); + return (AzureBlobFileSystem) FileSystem.get(rawConfig); + } + + private String getNonExistingUrl() { + String testUri = this.getTestUrl(); + return getAbfsScheme() + "://" + UUID.randomUUID() + testUri + .substring(testUri.indexOf("@")); } @Test @@ -77,22 +143,70 @@ public void testFailedRequestWhenFSNotExist() throws Exception { } @Test - public void testFailedRequestWhenCredentialsNotCorrect() throws Exception { - Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey); - Configuration config = this.getRawConfiguration(); - config.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, false); - String accountName = this.getAccountName(); - String configkKey = FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME + "." + accountName; - // Provide a wrong sharedKey - String secret = config.get(configkKey); - secret = (char) (secret.charAt(0) + 1) + secret.substring(1); - config.set(configkKey, secret); - - AzureBlobFileSystem fs = this.getFileSystem(config); - intercept(AbfsRestOperationException.class, - "\"Server failed to authenticate the request. Make sure the value of Authorization header is formed correctly including the signature.\", 403", - ()-> { - fs.getIsNamespaceEnabled(); - }); + public void testEnsureGetAclCallIsMadeOnceWhenConfigIsInvalid() + throws Exception { + unsetConfAndEnsureGetAclCallIsMadeOnce(); + ensureGetAclCallIsMadeOnceForInvalidConf(" "); + unsetConfAndEnsureGetAclCallIsMadeOnce(); + ensureGetAclCallIsMadeOnceForInvalidConf("Invalid conf"); + unsetConfAndEnsureGetAclCallIsMadeOnce(); + } + + @Test + public void testEnsureGetAclCallIsNeverMadeWhenConfigIsValid() + throws Exception { + unsetConfAndEnsureGetAclCallIsMadeOnce(); + ensureGetAclCallIsNeverMadeForValidConf(FALSE_STR.toLowerCase()); + unsetConfAndEnsureGetAclCallIsMadeOnce(); + ensureGetAclCallIsNeverMadeForValidConf(FALSE_STR.toUpperCase()); + unsetConfAndEnsureGetAclCallIsMadeOnce(); + ensureGetAclCallIsNeverMadeForValidConf(TRUE_STR.toLowerCase()); + unsetConfAndEnsureGetAclCallIsMadeOnce(); + ensureGetAclCallIsNeverMadeForValidConf(TRUE_STR.toUpperCase()); + unsetConfAndEnsureGetAclCallIsMadeOnce(); } -} \ No newline at end of file + + @Test + public void testEnsureGetAclCallIsMadeOnceWhenConfigIsNotPresent() + throws IOException { + unsetConfAndEnsureGetAclCallIsMadeOnce(); + } + + private void ensureGetAclCallIsMadeOnceForInvalidConf(String invalidConf) + throws Exception { + this.getFileSystem().getAbfsStore() + .setNamespaceEnabled(Trilean.getTrilean(invalidConf)); + AbfsClient mockClient = + callAbfsGetIsNamespaceEnabledAndReturnMockAbfsClient(); + verify(mockClient, times(1)).getAclStatus(anyString()); + } + + private void ensureGetAclCallIsNeverMadeForValidConf(String validConf) + throws Exception { + this.getFileSystem().getAbfsStore() + .setNamespaceEnabled(Trilean.getTrilean(validConf)); + AbfsClient mockClient = + callAbfsGetIsNamespaceEnabledAndReturnMockAbfsClient(); + verify(mockClient, never()).getAclStatus(anyString()); + } + + private void unsetConfAndEnsureGetAclCallIsMadeOnce() throws IOException { + this.getFileSystem().getAbfsStore().setNamespaceEnabled(Trilean.UNKNOWN); + AbfsClient mockClient = + callAbfsGetIsNamespaceEnabledAndReturnMockAbfsClient(); + verify(mockClient, times(1)).getAclStatus(anyString()); + } + + private AbfsClient callAbfsGetIsNamespaceEnabledAndReturnMockAbfsClient() + throws IOException { + final AzureBlobFileSystem abfs = this.getFileSystem(); + final AzureBlobFileSystemStore abfsStore = abfs.getAbfsStore(); + final AbfsClient mockClient = mock(AbfsClient.class); + doReturn(mock(AbfsRestOperation.class)).when(mockClient) + .getAclStatus(anyString()); + abfsStore.setClient(mockClient); + abfs.getIsNamespaceEnabled(); + return mockClient; + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java new file mode 100644 index 0000000000000..ab55ffa3fe3c6 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs; + +import org.junit.Assume; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; +import org.apache.hadoop.fs.azurebfs.services.AbfsClient; +import org.apache.hadoop.fs.azurebfs.services.AuthType; + +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +public class ITestSharedKeyAuth extends AbstractAbfsIntegrationTest { + + public ITestSharedKeyAuth() throws Exception { + super(); + } + + @Test + public void testWithWrongSharedKey() throws Exception { + Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey); + Configuration config = this.getRawConfiguration(); + config.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, + false); + String accountName = this.getAccountName(); + String configkKey = FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME + "." + accountName; + // a wrong sharedKey + String secret = "XjUjsGherkDpljuyThd7RpljhR6uhsFjhlxRpmhgD12lnj7lhfRn8kgPt5" + + "+MJHS7UJNDER+jn6KP6Jnm2ONQlm=="; + config.set(configkKey, secret); + + AbfsClient abfsClient = this.getFileSystem(config).getAbfsClient(); + intercept(AbfsRestOperationException.class, + "\"Server failed to authenticate the request. Make sure the value of " + + "Authorization header is formed correctly including the " + + "signature.\", 403", + () -> { + abfsClient.getAclStatus("/"); + }); + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSmallWriteOptimization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSmallWriteOptimization.java new file mode 100644 index 0000000000000..fce2b682f580a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSmallWriteOptimization.java @@ -0,0 +1,523 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.util.Arrays; +import java.util.Random; +import java.util.UUID; +import java.util.Map; +import java.io.IOException; + +import org.assertj.core.api.Assertions; +import org.junit.Assume; +import org.junit.runners.Parameterized; +import org.junit.runner.RunWith; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; + +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.BYTES_SENT; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CONNECTIONS_MADE; +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.SEND_REQUESTS; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_ENABLE_SMALL_WRITE_OPTIMIZATION; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_APPENDBLOB_ENABLED; + +/** + * Test combination for small writes with flush and close operations. + * This test class formulates an append test flow to assert on various scenarios. + * Test stages: + * 1. Pre-create test file of required size. This is determined by + * startingFileSize parameter. If it is 0, then pre-creation is skipped. + * + * 2. Formulate an append loop or iteration. An iteration, will do N writes + * (determined by numOfClientWrites parameter) with each writing X bytes + * (determined by recurringClientWriteSize parameter). + * + * 3. Determine total number of append iterations needed by a test. + * If intention is to close the outputStream right after append, setting + * directCloseTest parameter will determine 1 append test iteration with an + * ending close. + * Else, it will execute TEST_FLUSH_ITERATION number of test iterations, with + * each doing appends, hflush/hsync and then close. + * + * 4. Execute test iterations with asserts on number of store requests made and + * validating file content. + */ +@RunWith(Parameterized.class) +public class ITestSmallWriteOptimization extends AbstractAbfsScaleTest { + private static final int ONE_MB = 1024 * 1024; + private static final int TWO_MB = 2 * ONE_MB; + private static final int TEST_BUFFER_SIZE = TWO_MB; + private static final int HALF_TEST_BUFFER_SIZE = TWO_MB / 2; + private static final int QUARTER_TEST_BUFFER_SIZE = TWO_MB / 4; + private static final int TEST_FLUSH_ITERATION = 2; + + @Parameterized.Parameter + public String testScenario; + + @Parameterized.Parameter(1) + public boolean enableSmallWriteOptimization; + + /** + * If true, will initiate close after appends. (That is, no explicit hflush or + * hsync calls will be made from client app.) + */ + @Parameterized.Parameter(2) + public boolean directCloseTest; + + /** + * If non-zero, test file should be created as pre-requisite with this size. + */ + @Parameterized.Parameter(3) + public Integer startingFileSize; + + /** + * Determines the write sizes to be issued by client app. + */ + @Parameterized.Parameter(4) + public Integer recurringClientWriteSize; + + /** + * Determines the number of Client writes to make. + */ + @Parameterized.Parameter(5) + public Integer numOfClientWrites; + + /** + * True, if the small write optimization is supposed to be effective in + * the scenario. + */ + @Parameterized.Parameter(6) + public boolean flushExpectedToBeMergedWithAppend; + + @Parameterized.Parameters(name = "{0}") + public static Iterable params() { + return Arrays.asList( + // Parameter Order : + // testScenario, + // enableSmallWriteOptimization, directCloseTest, startingFileSize, + // recurringClientWriteSize, numOfClientWrites, flushExpectedToBeMergedWithAppend + new Object[][]{ + // Buffer Size Write tests + { "OptmON_FlushCloseTest_EmptyFile_BufferSizeWrite", + true, false, 0, TEST_BUFFER_SIZE, 1, false + }, + { "OptmON_FlushCloseTest_NonEmptyFile_BufferSizeWrite", + true, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 1, false + }, + { "OptmON_CloseTest_EmptyFile_BufferSizeWrite", + true, true, 0, TEST_BUFFER_SIZE, 1, false + }, + { "OptmON_CloseTest_NonEmptyFile_BufferSizeWrite", + true, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 1, false + }, + { "OptmOFF_FlushCloseTest_EmptyFile_BufferSizeWrite", + false, false, 0, TEST_BUFFER_SIZE, 1, false + }, + { "OptmOFF_FlushCloseTest_NonEmptyFile_BufferSizeWrite", + false, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 1, false + }, + { "OptmOFF_CloseTest_EmptyFile_BufferSizeWrite", + false, true, 0, TEST_BUFFER_SIZE, 1, false + }, + { "OptmOFF_CloseTest_NonEmptyFile_BufferSizeWrite", + false, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 1, false + }, + // Less than buffer size write tests + { "OptmON_FlushCloseTest_EmptyFile_LessThanBufferSizeWrite", + true, false, 0, Math.abs(HALF_TEST_BUFFER_SIZE), 1, true + }, + { "OptmON_FlushCloseTest_NonEmptyFile_LessThanBufferSizeWrite", + true, false, 2 * TEST_BUFFER_SIZE, + Math.abs(HALF_TEST_BUFFER_SIZE), 1, true + }, + { "OptmON_CloseTest_EmptyFile_LessThanBufferSizeWrite", + true, true, 0, Math.abs(HALF_TEST_BUFFER_SIZE), 1, true + }, + { "OptmON_CloseTest_NonEmptyFile_LessThanBufferSizeWrite", + true, true, 2 * TEST_BUFFER_SIZE, + Math.abs(HALF_TEST_BUFFER_SIZE), 1, true + }, + { "OptmOFF_FlushCloseTest_EmptyFile_LessThanBufferSizeWrite", + false, false, 0, Math.abs(HALF_TEST_BUFFER_SIZE), 1, false + }, + { "OptmOFF_FlushCloseTest_NonEmptyFile_LessThanBufferSizeWrite", + false, false, 2 * TEST_BUFFER_SIZE, + Math.abs(HALF_TEST_BUFFER_SIZE), 1, false + }, + { "OptmOFF_CloseTest_EmptyFile_LessThanBufferSizeWrite", + false, true, 0, Math.abs(HALF_TEST_BUFFER_SIZE), 1, false + }, + { "OptmOFF_CloseTest_NonEmptyFile_LessThanBufferSizeWrite", + false, true, 2 * TEST_BUFFER_SIZE, + Math.abs(HALF_TEST_BUFFER_SIZE), 1, false + }, + // Multiple small writes still less than buffer size + { "OptmON_FlushCloseTest_EmptyFile_MultiSmallWritesStillLessThanBufferSize", + true, false, 0, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, true + }, + { "OptmON_FlushCloseTest_NonEmptyFile_MultiSmallWritesStillLessThanBufferSize", + true, false, 2 * TEST_BUFFER_SIZE, + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, true + }, + { "OptmON_CloseTest_EmptyFile_MultiSmallWritesStillLessThanBufferSize", + true, true, 0, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, true + }, + { "OptmON_CloseTest_NonEmptyFile_MultiSmallWritesStillLessThanBufferSize", + true, true, 2 * TEST_BUFFER_SIZE, + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, true + }, + { "OptmOFF_FlushCloseTest_EmptyFile_MultiSmallWritesStillLessThanBufferSize", + false, false, 0, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false + }, + { "OptmOFF_FlushCloseTest_NonEmptyFile_MultiSmallWritesStillLessThanBufferSize", + false, false, 2 * TEST_BUFFER_SIZE, + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false + }, + { "OptmOFF_CloseTest_EmptyFile_MultiSmallWritesStillLessThanBufferSize", + false, true, 0, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false + }, + { "OptmOFF_CloseTest_NonEmptyFile_MultiSmallWritesStillLessThanBufferSize", + false, true, 2 * TEST_BUFFER_SIZE, + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false + }, + // Multiple full buffer writes + { "OptmON_FlushCloseTest_EmptyFile_MultiBufferSizeWrite", + true, false, 0, TEST_BUFFER_SIZE, 3, false + }, + { "OptmON_FlushCloseTest_NonEmptyFile_MultiBufferSizeWrite", + true, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 3, false + }, + { "OptmON_CloseTest_EmptyFile_MultiBufferSizeWrite", + true, true, 0, TEST_BUFFER_SIZE, 3, false + }, + { "OptmON_CloseTest_NonEmptyFile_MultiBufferSizeWrite", + true, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 3, false + }, + { "OptmOFF_FlushCloseTest_EmptyFile_MultiBufferSizeWrite", + false, false, 0, TEST_BUFFER_SIZE, 3, false + }, + { "OptmOFF_FlushCloseTest_NonEmptyFile_MultiBufferSizeWrite", + false, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 3, false + }, + { "OptmOFF_CloseTest_EmptyFile_MultiBufferSizeWrite", + false, true, 0, TEST_BUFFER_SIZE, 3, false + }, + { "OptmOFF_CloseTest_NonEmptyFile_MultiBufferSizeWrite", + false, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 3, false + }, + // Multiple full buffers triggered and data less than buffer size pending + { "OptmON_FlushCloseTest_EmptyFile_BufferAndExtraWrite", + true, false, 0, + TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), + 3, false + }, + { "OptmON_FlushCloseTest_NonEmptyFile_BufferAndExtraWrite", + true, false, 2 * TEST_BUFFER_SIZE, + TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), + 3, false + }, + { "OptmON_CloseTest_EmptyFile__BufferAndExtraWrite", + true, true, 0, + TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), + 3, false + }, + { "OptmON_CloseTest_NonEmptyFile_BufferAndExtraWrite", + true, true, 2 * TEST_BUFFER_SIZE, + TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), + 3, false + }, + { "OptmOFF_FlushCloseTest_EmptyFile_BufferAndExtraWrite", + false, false, 0, + TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), + 3, false + }, + { "OptmOFF_FlushCloseTest_NonEmptyFile_BufferAndExtraWrite", + false, false, 2 * TEST_BUFFER_SIZE, + TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), + 3, false + }, + { "OptmOFF_CloseTest_EmptyFile_BufferAndExtraWrite", + false, true, 0, + TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), + 3, false + }, + { "OptmOFF_CloseTest_NonEmptyFile_BufferAndExtraWrite", + false, true, 2 * TEST_BUFFER_SIZE, + TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), + 3, false + }, + // 0 byte tests + { "OptmON_FlushCloseTest_EmptyFile_0ByteWrite", + true, false, 0, 0, 1, false + }, + { "OptmON_FlushCloseTest_NonEmptyFile_0ByteWrite", + true, false, 2 * TEST_BUFFER_SIZE, 0, 1, false + }, + { "OptmON_CloseTest_EmptyFile_0ByteWrite", + true, true, 0, 0, 1, false + }, + { "OptmON_CloseTest_NonEmptyFile_0ByteWrite", + true, true, 2 * TEST_BUFFER_SIZE, 0, 1, false + }, + { "OptmOFF_FlushCloseTest_EmptyFile_0ByteWrite", + false, false, 0, 0, 1, false + }, + { "OptmOFF_FlushCloseTest_NonEmptyFile_0ByteWrite", + false, false, 2 * TEST_BUFFER_SIZE, 0, 1, false + }, + { "OptmOFF_CloseTest_EmptyFile_0ByteWrite", + false, true, 0, 0, 1, false + }, + { "OptmOFF_CloseTest_NonEmptyFile_0ByteWrite", + false, true, 2 * TEST_BUFFER_SIZE, 0, 1, false + }, + }); + } + public ITestSmallWriteOptimization() throws Exception { + super(); + } + + @Test + public void testSmallWriteOptimization() + throws IOException { + boolean serviceDefaultOptmSettings = DEFAULT_AZURE_ENABLE_SMALL_WRITE_OPTIMIZATION; + // Tests with Optimization should only run if service has the feature on by + // default. Default settings will be turned on when server support is + // available on all store prod regions. + if (enableSmallWriteOptimization) { + Assume.assumeTrue(serviceDefaultOptmSettings); + } + + final AzureBlobFileSystem currentfs = this.getFileSystem(); + Configuration config = currentfs.getConf(); + boolean isAppendBlobTestSettingEnabled = (config.get(FS_AZURE_TEST_APPENDBLOB_ENABLED) == "true"); + + // This optimization doesnt take effect when append blob is on. + Assume.assumeFalse(isAppendBlobTestSettingEnabled); + + config.set(ConfigurationKeys.AZURE_WRITE_BUFFER_SIZE, Integer.toString(TEST_BUFFER_SIZE)); + config.set(ConfigurationKeys.AZURE_ENABLE_SMALL_WRITE_OPTIMIZATION, Boolean.toString(enableSmallWriteOptimization)); + final AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem.get( + currentfs.getUri(), config); + + formulateSmallWriteTestAppendPattern(fs, startingFileSize, + recurringClientWriteSize, numOfClientWrites, + directCloseTest, flushExpectedToBeMergedWithAppend); + } + + /** + * if isDirectCloseTest == true, append + close is triggered + * if isDirectCloseTest == false, append + flush runs are repeated over + * iterations followed by close + * @param fs + * @param startingFileSize + * @param recurringWriteSize + * @param numOfWrites + * @param isDirectCloseTest + * @throws IOException + */ + private void formulateSmallWriteTestAppendPattern(final AzureBlobFileSystem fs, + int startingFileSize, + int recurringWriteSize, + int numOfWrites, + boolean isDirectCloseTest, + boolean flushExpectedToBeMergedWithAppend) throws IOException { + + int totalDataToBeAppended = 0; + int testIteration = 0; + int dataWrittenPerIteration = (numOfWrites * recurringWriteSize); + + if (isDirectCloseTest) { + totalDataToBeAppended = dataWrittenPerIteration; + testIteration = 1; + } else { + testIteration = TEST_FLUSH_ITERATION; + totalDataToBeAppended = testIteration * dataWrittenPerIteration; + } + + int totalFileSize = totalDataToBeAppended + startingFileSize; + // write buffer of file size created. This will be used as write + // source and for file content validation + final byte[] writeBuffer = new byte[totalFileSize]; + new Random().nextBytes(writeBuffer); + int writeBufferCursor = 0; + + Path testPath = new Path(getMethodName() + UUID.randomUUID().toString()); + FSDataOutputStream opStream; + + if (startingFileSize > 0) { + writeBufferCursor += createFileWithStartingTestSize(fs, writeBuffer, writeBufferCursor, testPath, + startingFileSize); + opStream = fs.append(testPath); + } else { + opStream = fs.create(testPath); + } + + final int writeBufferSize = fs.getAbfsStore() + .getAbfsConfiguration() + .getWriteBufferSize(); + long expectedTotalRequestsMade = fs.getInstrumentationMap() + .get(CONNECTIONS_MADE.getStatName()); + long expectedRequestsMadeWithData = fs.getInstrumentationMap() + .get(SEND_REQUESTS.getStatName()); + long expectedBytesSent = fs.getInstrumentationMap() + .get(BYTES_SENT.getStatName()); + + while (testIteration > 0) { + // trigger recurringWriteSize appends over numOfWrites + writeBufferCursor += executeWritePattern(opStream, writeBuffer, + writeBufferCursor, numOfWrites, recurringWriteSize); + + int numOfBuffersWrittenToStore = (int) Math.floor( + dataWrittenPerIteration / writeBufferSize); + int dataSizeWrittenToStore = numOfBuffersWrittenToStore * writeBufferSize; + int pendingDataToStore = dataWrittenPerIteration - dataSizeWrittenToStore; + + expectedTotalRequestsMade += numOfBuffersWrittenToStore; + expectedRequestsMadeWithData += numOfBuffersWrittenToStore; + expectedBytesSent += dataSizeWrittenToStore; + + if (isDirectCloseTest) { + opStream.close(); + } else { + opStream.hflush(); + } + + boolean wasDataPendingToBeWrittenToServer = (pendingDataToStore > 0); + // Small write optimization will only work if + // a. config for small write optimization is on + // b. no buffer writes have been triggered since last flush + // c. there is some pending data in buffer to write to store + final boolean smallWriteOptimizationEnabled = fs.getAbfsStore() + .getAbfsConfiguration() + .isSmallWriteOptimizationEnabled(); + boolean flushWillBeMergedWithAppend = smallWriteOptimizationEnabled + && (numOfBuffersWrittenToStore == 0) + && (wasDataPendingToBeWrittenToServer); + + Assertions.assertThat(flushWillBeMergedWithAppend) + .describedAs(flushExpectedToBeMergedWithAppend + ? "Flush was to be merged with Append" + : "Flush should not have been merged with Append") + .isEqualTo(flushExpectedToBeMergedWithAppend); + + int totalAppendFlushCalls = (flushWillBeMergedWithAppend + ? 1 // 1 append (with flush and close param) + : (wasDataPendingToBeWrittenToServer) + ? 2 // 1 append + 1 flush (with close) + : 1); // 1 flush (with close) + + expectedTotalRequestsMade += totalAppendFlushCalls; + expectedRequestsMadeWithData += totalAppendFlushCalls; + expectedBytesSent += wasDataPendingToBeWrittenToServer + ? pendingDataToStore + : 0; + + assertOpStats(fs.getInstrumentationMap(), expectedTotalRequestsMade, + expectedRequestsMadeWithData, expectedBytesSent); + + if (isDirectCloseTest) { + // stream already closed + validateStoreAppends(fs, testPath, totalFileSize, writeBuffer); + return; + } + + testIteration--; + } + + opStream.close(); + expectedTotalRequestsMade += 1; + expectedRequestsMadeWithData += 1; + // no change in expectedBytesSent + assertOpStats(fs.getInstrumentationMap(), expectedTotalRequestsMade, expectedRequestsMadeWithData, expectedBytesSent); + + validateStoreAppends(fs, testPath, totalFileSize, writeBuffer); + } + + private int createFileWithStartingTestSize(AzureBlobFileSystem fs, byte[] writeBuffer, + int writeBufferCursor, Path testPath, int startingFileSize) + throws IOException { + FSDataOutputStream opStream = fs.create(testPath); + writeBufferCursor += executeWritePattern(opStream, + writeBuffer, + writeBufferCursor, + 1, + startingFileSize); + + opStream.close(); + Assertions.assertThat(fs.getFileStatus(testPath).getLen()) + .describedAs("File should be of size %d at the start of test.", + startingFileSize) + .isEqualTo(startingFileSize); + + return writeBufferCursor; + } + + private void validateStoreAppends(AzureBlobFileSystem fs, + Path testPath, + int totalFileSize, + byte[] bufferWritten) + throws IOException { + // Final validation + Assertions.assertThat(fs.getFileStatus(testPath).getLen()) + .describedAs("File should be of size %d at the end of test.", + totalFileSize) + .isEqualTo(totalFileSize); + + byte[] fileReadFromStore = new byte[totalFileSize]; + fs.open(testPath).read(fileReadFromStore, 0, totalFileSize); + + assertArrayEquals("Test file content incorrect", bufferWritten, + fileReadFromStore); + } + + private void assertOpStats(Map metricMap, + long expectedTotalRequestsMade, + long expectedRequestsMadeWithData, + long expectedBytesSent) { + assertAbfsStatistics(CONNECTIONS_MADE, expectedTotalRequestsMade, + metricMap); + assertAbfsStatistics(SEND_REQUESTS, expectedRequestsMadeWithData, + metricMap); + assertAbfsStatistics(BYTES_SENT, expectedBytesSent, metricMap); + } + + private int executeWritePattern(FSDataOutputStream opStream, + byte[] buffer, + int startOffset, + int writeLoopCount, + int writeSize) + throws IOException { + int dataSizeWritten = startOffset; + + while (writeLoopCount > 0) { + opStream.write(buffer, startOffset, writeSize); + startOffset += writeSize; + writeLoopCount--; + } + + dataSizeWritten = startOffset - dataSizeWritten; + return dataSizeWritten; + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java index 0f550d825e101..bda845bb45ad5 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.annotations.ConfigurationValidationAnnotations.StringConfigurationValidatorAnnotation; import org.apache.hadoop.fs.azurebfs.contracts.annotations.ConfigurationValidationAnnotations.LongConfigurationValidatorAnnotation; import org.apache.hadoop.fs.azurebfs.contracts.annotations.ConfigurationValidationAnnotations.Base64StringConfigurationValidatorAnnotation; -import org.apache.hadoop.fs.azurebfs.contracts.exceptions.ConfigurationPropertyNotFoundException; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.KeyProviderException; import org.apache.hadoop.fs.azurebfs.utils.Base64; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_SSL_CHANNEL_MODE_KEY; @@ -155,7 +155,7 @@ public void testGetAccountKey() throws Exception { assertEquals(this.encodedAccountKey, accountKey); } - @Test(expected = ConfigurationPropertyNotFoundException.class) + @Test(expected = KeyProviderException.class) public void testGetAccountKeyWithNonExistingAccountName() throws Exception { Configuration configuration = new Configuration(); configuration.addResource(TestConfigurationKeys.TEST_CONFIGURATION_FILE_NAME); @@ -182,4 +182,11 @@ public void testSSLSocketFactoryConfiguration() assertEquals(DelegatingSSLSocketFactory.SSLChannelMode.OpenSSL, localAbfsConfiguration.getPreferredSSLFactoryOption()); } -} \ No newline at end of file + public static AbfsConfiguration updateRetryConfigs(AbfsConfiguration abfsConfig, + int retryCount, + int backoffTime) { + abfsConfig.setMaxIoRetries(retryCount); + abfsConfig.setMaxBackoffIntervalMilliseconds(backoffTime); + return abfsConfig; + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsErrorTranslation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsErrorTranslation.java new file mode 100644 index 0000000000000..2c14b7af2821a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsErrorTranslation.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.FileNotFoundException; +import java.net.HttpURLConnection; +import java.nio.file.AccessDeniedException; + +import org.junit.Test; + +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; +import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode; +import org.apache.hadoop.test.AbstractHadoopTestBase; + +import static org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem.checkException; +import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.AUTHORIZATION_PERMISSION_MISS_MATCH; +import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.PATH_ALREADY_EXISTS; +import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.PATH_NOT_FOUND; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +/** + * Test suite to verify exception conversion, filtering etc. + */ +public class TestAbfsErrorTranslation extends AbstractHadoopTestBase { + + public static final Path PATH = new Path("abfs//store/path"); + + @Test + public void testConvert403ToAccessDenied() throws Throwable { + assertTranslated(HttpURLConnection.HTTP_FORBIDDEN, + AUTHORIZATION_PERMISSION_MISS_MATCH, + AccessDeniedException.class, + AUTHORIZATION_PERMISSION_MISS_MATCH.getErrorCode()); + } + + @Test + public void testConvert404ToFNFE() throws Throwable { + assertTranslated(HttpURLConnection.HTTP_NOT_FOUND, + PATH_NOT_FOUND, + FileNotFoundException.class, + PATH_NOT_FOUND.getErrorCode()); + } + + @Test + public void testConvert409ToFileAlreadyExistsException() throws Throwable { + assertTranslated(HttpURLConnection.HTTP_CONFLICT, + PATH_ALREADY_EXISTS, + FileAlreadyExistsException.class, + PATH_ALREADY_EXISTS.getErrorCode()); + } + + /** + * Assert that for a given status code and AzureServiceErrorCode, a specific + * exception class is raised. + * @param type of exception + * @param httpStatus http status code + * @param exitCode AzureServiceErrorCode + * @param clazz class of raised exception + * @param expectedText text to expect in the exception + * @throws Exception any other exception than the one expected + */ + private void assertTranslated( + int httpStatus, AzureServiceErrorCode exitCode, + Class clazz, String expectedText) throws Exception { + AbfsRestOperationException ex = + new AbfsRestOperationException(httpStatus, exitCode.getErrorCode(), + "", null); + intercept(clazz, expectedText, () -> { + checkException(PATH, ex); + return "expected exception translation from " + ex; + }); + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java new file mode 100644 index 0000000000000..22c247f98af63 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamStatisticsImpl; + +public class TestAbfsInputStreamStatistics extends AbstractAbfsIntegrationTest { + + private static final int OPERATIONS = 100; + + public TestAbfsInputStreamStatistics() throws Exception { + } + + /** + * Test to check the bytesReadFromBuffer statistic value from AbfsInputStream. + */ + @Test + public void testBytesReadFromBufferStatistic() { + describe("Testing bytesReadFromBuffer statistics value in AbfsInputStream"); + + AbfsInputStreamStatisticsImpl abfsInputStreamStatistics = + new AbfsInputStreamStatisticsImpl(); + + //Increment the bytesReadFromBuffer value. + for (int i = 0; i < OPERATIONS; i++) { + abfsInputStreamStatistics.bytesReadFromBuffer(1); + } + + /* + * Since we incremented the bytesReadFromBuffer OPERATIONS times, this + * should be the expected value. + */ + assertEquals("Mismatch in bytesReadFromBuffer value", OPERATIONS, + abfsInputStreamStatistics.getBytesReadFromBuffer()); + + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java new file mode 100644 index 0000000000000..0639cf2f82b9a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; +import java.util.Map; + +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; + +public class TestAbfsNetworkStatistics extends AbstractAbfsIntegrationTest { + + private static final int LARGE_OPERATIONS = 1000; + + public TestAbfsNetworkStatistics() throws Exception { + } + + /** + * Test to check correct values of read and write throttling statistics in + * {@code AbfsClientThrottlingAnalyzer}. + */ + @Test + public void testAbfsThrottlingStatistics() throws IOException { + describe("Test to check correct values of read throttle and write " + + "throttle statistics in Abfs"); + + AbfsCounters statistics = + new AbfsCountersImpl(getFileSystem().getUri()); + + /* + * Calling the throttle methods to check correct summation and values of + * the counters. + */ + for (int i = 0; i < LARGE_OPERATIONS; i++) { + statistics.incrementCounter(AbfsStatistic.READ_THROTTLES, 1); + statistics.incrementCounter(AbfsStatistic.WRITE_THROTTLES, 1); + } + + Map metricMap = statistics.toMap(); + + /* + * Test to check read and write throttle statistics gave correct values for + * 1000 calls. + */ + assertAbfsStatistics(AbfsStatistic.READ_THROTTLES, LARGE_OPERATIONS, + metricMap); + assertAbfsStatistics(AbfsStatistic.WRITE_THROTTLES, LARGE_OPERATIONS, + metricMap); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsOutputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsOutputStreamStatistics.java new file mode 100644 index 0000000000000..5f9404302bd2c --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsOutputStreamStatistics.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.util.Random; + +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; +import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamStatisticsImpl; + +/** + * Unit tests for AbfsOutputStream statistics. + */ +public class TestAbfsOutputStreamStatistics + extends AbstractAbfsIntegrationTest { + + private static final int LOW_RANGE_FOR_RANDOM_VALUE = 49; + private static final int HIGH_RANGE_FOR_RANDOM_VALUE = 9999; + private static final int OPERATIONS = 10; + + public TestAbfsOutputStreamStatistics() throws Exception { + } + + /** + * Tests to check number of bytes failed to upload in + * {@link AbfsOutputStream}. + */ + @Test + public void testAbfsOutputStreamBytesFailed() { + describe("Testing number of bytes failed during upload in AbfsOutputSteam"); + + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = + new AbfsOutputStreamStatisticsImpl(); + + //Test for zero bytes uploaded. + assertEquals("Mismatch in number of bytes failed to upload", 0, + abfsOutputStreamStatistics.getBytesUploadFailed()); + + //Populating small random value for bytesFailed. + int randomBytesFailed = new Random().nextInt(LOW_RANGE_FOR_RANDOM_VALUE); + abfsOutputStreamStatistics.uploadFailed(randomBytesFailed); + //Test for bytes failed to upload. + assertEquals("Mismatch in number of bytes failed to upload", + randomBytesFailed, abfsOutputStreamStatistics.getBytesUploadFailed()); + + //Reset statistics for the next test. + abfsOutputStreamStatistics = new AbfsOutputStreamStatisticsImpl(); + + /* + * Entering multiple random values for bytesFailed to check correct + * summation of values. + */ + int expectedBytesFailed = 0; + for (int i = 0; i < OPERATIONS; i++) { + randomBytesFailed = new Random().nextInt(HIGH_RANGE_FOR_RANDOM_VALUE); + abfsOutputStreamStatistics.uploadFailed(randomBytesFailed); + expectedBytesFailed += randomBytesFailed; + } + //Test for bytes failed to upload. + assertEquals("Mismatch in number of bytes failed to upload", + expectedBytesFailed, abfsOutputStreamStatistics.getBytesUploadFailed()); + } + + /** + * Tests to check time spent on waiting for tasks to be complete on a + * blocking queue in {@link AbfsOutputStream}. + */ + @Test + public void testAbfsOutputStreamTimeSpentOnWaitTask() { + describe("Testing time Spent on waiting for task to be completed in " + + "AbfsOutputStream"); + + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = + new AbfsOutputStreamStatisticsImpl(); + + //Test for initial value of timeSpentWaitTask. + assertEquals("Mismatch in time spent on waiting for tasks to complete", 0, + abfsOutputStreamStatistics.getTimeSpentOnTaskWait()); + + abfsOutputStreamStatistics + .timeSpentTaskWait(); + //Test for one op call value of timeSpentWaitTask. + assertEquals("Mismatch in time spent on waiting for tasks to complete", + 1, abfsOutputStreamStatistics.getTimeSpentOnTaskWait()); + + //Reset statistics for the next test. + abfsOutputStreamStatistics = new AbfsOutputStreamStatisticsImpl(); + + /* + * Entering multiple values for timeSpentTaskWait() to check the + * summation is happening correctly. Also calculating the expected result. + */ + for (int i = 0; i < OPERATIONS; i++) { + abfsOutputStreamStatistics.timeSpentTaskWait(); + } + + /* + * Test to check correct value of timeSpentTaskWait after OPERATIONS + * number of op calls. + */ + assertEquals("Mismatch in time spent on waiting for tasks to complete", + OPERATIONS, + abfsOutputStreamStatistics.getTimeSpentOnTaskWait()); + } + + /** + * Unit Tests to check correct values of queue shrunk operations in + * AbfsOutputStream. + * + */ + @Test + public void testAbfsOutputStreamQueueShrink() { + describe("Testing queue shrink operations by AbfsOutputStream"); + + AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = + new AbfsOutputStreamStatisticsImpl(); + + //Test for shrinking queue zero time. + assertEquals("Mismatch in queue shrunk operations", 0, + abfsOutputStreamStatistics.getQueueShrunkOps()); + + abfsOutputStreamStatistics.queueShrunk(); + + //Test for shrinking queue 1 time. + assertEquals("Mismatch in queue shrunk operations", 1, + abfsOutputStreamStatistics.getQueueShrunkOps()); + + //Reset statistics for the next test. + abfsOutputStreamStatistics = new AbfsOutputStreamStatisticsImpl(); + + /* + * Entering random values for queueShrunkOps and checking the correctness + * of summation for the statistic. + */ + int randomQueueValues = new Random().nextInt(HIGH_RANGE_FOR_RANDOM_VALUE); + for (int i = 0; i < randomQueueValues * OPERATIONS; i++) { + abfsOutputStreamStatistics.queueShrunk(); + } + /* + * Test for random times incrementing queue shrunk operations. + */ + assertEquals("Mismatch in queue shrunk operations", + randomQueueValues * OPERATIONS, + abfsOutputStreamStatistics.getQueueShrunkOps()); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java new file mode 100644 index 0000000000000..f831d2d4cd26b --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import java.io.IOException; +import java.util.Map; + +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; + +/** + * Unit tests for Abfs common counters. + */ +public class TestAbfsStatistics extends AbstractAbfsIntegrationTest { + + private static final int LARGE_OPS = 100; + + public TestAbfsStatistics() throws Exception { + } + + /** + * Tests for op_get_delegation_token and error_ignore counter values. + */ + @Test + public void testInitializeStats() throws IOException { + describe("Testing the counter values after Abfs is initialised"); + + AbfsCounters instrumentation = + new AbfsCountersImpl(getFileSystem().getUri()); + + //Testing summation of the counter values. + for (int i = 0; i < LARGE_OPS; i++) { + instrumentation.incrementCounter(AbfsStatistic.CALL_GET_DELEGATION_TOKEN, 1); + instrumentation.incrementCounter(AbfsStatistic.ERROR_IGNORED, 1); + } + + Map metricMap = instrumentation.toMap(); + + assertAbfsStatistics(AbfsStatistic.CALL_GET_DELEGATION_TOKEN, LARGE_OPS, + metricMap); + assertAbfsStatistics(AbfsStatistic.ERROR_IGNORED, LARGE_OPS, metricMap); + + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java index a790cf214872b..4cb0961e9364a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java @@ -20,13 +20,24 @@ import java.io.IOException; +import org.assertj.core.api.Assertions; +import org.junit.Test; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException; +import org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider; +import org.apache.hadoop.fs.azurebfs.oauth2.CustomTokenProviderAdapter; +import org.apache.hadoop.fs.azurebfs.services.AuthType; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -import org.junit.Test; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_SECRET; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_SAS_TOKEN_PROVIDER_TYPE; /** * Tests correct precedence of various configurations that might be returned. @@ -40,6 +51,14 @@ * that do allow default values (all others) follow another form. */ public class TestAccountConfiguration { + private static final String TEST_OAUTH_PROVIDER_CLASS_CONFIG = "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider"; + private static final String TEST_CUSTOM_PROVIDER_CLASS_CONFIG = "org.apache.hadoop.fs.azurebfs.oauth2.RetryTestTokenProvider"; + private static final String TEST_SAS_PROVIDER_CLASS_CONFIG_1 = "org.apache.hadoop.fs.azurebfs.extensions.MockErrorSASTokenProvider"; + private static final String TEST_SAS_PROVIDER_CLASS_CONFIG_2 = "org.apache.hadoop.fs.azurebfs.extensions.MockSASTokenProvider"; + + private static final String TEST_OAUTH_ENDPOINT = "oauthEndpoint"; + private static final String TEST_CLIENT_ID = "clientId"; + private static final String TEST_CLIENT_SECRET = "clientSecret"; @Test public void testStringPrecedence() @@ -248,7 +267,7 @@ private class GetClassImpl1 implements GetClassInterface { } @Test - public void testClassPrecedence() + public void testClass() throws IllegalAccessException, IOException, InvalidConfigurationValueException { final String accountName = "account"; @@ -264,22 +283,182 @@ public void testClassPrecedence() conf.setClass(globalKey, class0, xface); assertEquals("Default value returned even though account-agnostic config was set", - abfsConf.getClass(globalKey, class1, xface), class0); + abfsConf.getAccountAgnosticClass(globalKey, class1, xface), class0); conf.unset(globalKey); assertEquals("Default value not returned even though config was unset", - abfsConf.getClass(globalKey, class1, xface), class1); + abfsConf.getAccountAgnosticClass(globalKey, class1, xface), class1); conf.setClass(accountKey, class0, xface); assertEquals("Default value returned even though account-specific config was set", - abfsConf.getClass(globalKey, class1, xface), class0); + abfsConf.getAccountSpecificClass(globalKey, class1, xface), class0); conf.unset(accountKey); assertEquals("Default value not returned even though config was unset", - abfsConf.getClass(globalKey, class1, xface), class1); + abfsConf.getAccountSpecificClass(globalKey, class1, xface), class1); conf.setClass(accountKey, class1, xface); conf.setClass(globalKey, class0, xface); assertEquals("Account-agnostic or default value returned even though account-specific config was set", - abfsConf.getClass(globalKey, class0, xface), class1); + abfsConf.getAccountSpecificClass(globalKey, class0, xface), class1); + } + + @Test + public void testSASProviderPrecedence() + throws IOException, IllegalAccessException { + final String accountName = "account"; + + final Configuration conf = new Configuration(); + final AbfsConfiguration abfsConf = new AbfsConfiguration(conf, accountName); + + // AccountSpecific: SAS with provider set as SAS_Provider_1 + abfsConf.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME + "." + accountName, + "SAS"); + abfsConf.set(FS_AZURE_SAS_TOKEN_PROVIDER_TYPE + "." + accountName, + TEST_SAS_PROVIDER_CLASS_CONFIG_1); + + // Global: SAS with provider set as SAS_Provider_2 + abfsConf.set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, + AuthType.SAS.toString()); + abfsConf.set(FS_AZURE_SAS_TOKEN_PROVIDER_TYPE, + TEST_SAS_PROVIDER_CLASS_CONFIG_2); + + Assertions.assertThat( + abfsConf.getSASTokenProvider().getClass().getName()) + .describedAs( + "Account-specific SAS token provider should be in effect.") + .isEqualTo(TEST_SAS_PROVIDER_CLASS_CONFIG_1); + } + + @Test + public void testAccessTokenProviderPrecedence() + throws IllegalAccessException, IOException { + final String accountName = "account"; + + final Configuration conf = new Configuration(); + final AbfsConfiguration abfsConf = new AbfsConfiguration(conf, accountName); + + // Global: Custom , AccountSpecific: OAuth + testGlobalAndAccountOAuthPrecedence(abfsConf, AuthType.Custom, + AuthType.OAuth); + + // Global: OAuth , AccountSpecific: Custom + testGlobalAndAccountOAuthPrecedence(abfsConf, AuthType.OAuth, + AuthType.Custom); + + // Global: (non-oAuth) SAS , AccountSpecific: Custom + testGlobalAndAccountOAuthPrecedence(abfsConf, AuthType.SAS, + AuthType.Custom); + + // Global: Custom , AccountSpecific: - + testGlobalAndAccountOAuthPrecedence(abfsConf, AuthType.Custom, null); + + // Global: OAuth , AccountSpecific: - + testGlobalAndAccountOAuthPrecedence(abfsConf, AuthType.OAuth, null); + + // Global: - , AccountSpecific: Custom + testGlobalAndAccountOAuthPrecedence(abfsConf, null, AuthType.Custom); + + // Global: - , AccountSpecific: OAuth + testGlobalAndAccountOAuthPrecedence(abfsConf, null, AuthType.OAuth); + } + + public void testGlobalAndAccountOAuthPrecedence(AbfsConfiguration abfsConf, + AuthType globalAuthType, + AuthType accountSpecificAuthType) + throws IOException { + if (globalAuthType == null) { + unsetAuthConfig(abfsConf, false); + } else { + setAuthConfig(abfsConf, false, globalAuthType); + } + + if (accountSpecificAuthType == null) { + unsetAuthConfig(abfsConf, true); + } else { + setAuthConfig(abfsConf, true, accountSpecificAuthType); + } + + // If account specific AuthType is present, precedence is always for it. + AuthType expectedEffectiveAuthType; + if (accountSpecificAuthType != null) { + expectedEffectiveAuthType = accountSpecificAuthType; + } else { + expectedEffectiveAuthType = globalAuthType; + } + + Class expectedEffectiveTokenProviderClassType = + (expectedEffectiveAuthType == AuthType.OAuth) + ? ClientCredsTokenProvider.class + : CustomTokenProviderAdapter.class; + + Assertions.assertThat( + abfsConf.getTokenProvider().getClass().getTypeName()) + .describedAs( + "Account-specific settings takes precendence to global" + + " settings. In absence of Account settings, global settings " + + "should take effect.") + .isEqualTo(expectedEffectiveTokenProviderClassType.getTypeName()); + + + unsetAuthConfig(abfsConf, false); + unsetAuthConfig(abfsConf, true); + } + + public void setAuthConfig(AbfsConfiguration abfsConf, + boolean isAccountSetting, + AuthType authType) { + final String accountNameSuffix = "." + abfsConf.getAccountName(); + String authKey = FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME + + (isAccountSetting ? accountNameSuffix : ""); + String providerClassKey = ""; + String providerClassValue = ""; + + switch (authType) { + case OAuth: + providerClassKey = FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME + + (isAccountSetting ? accountNameSuffix : ""); + providerClassValue = TEST_OAUTH_PROVIDER_CLASS_CONFIG; + + abfsConf.set(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT + + ((isAccountSetting) ? accountNameSuffix : ""), + TEST_OAUTH_ENDPOINT); + abfsConf.set(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID + + ((isAccountSetting) ? accountNameSuffix : ""), + TEST_CLIENT_ID); + abfsConf.set(FS_AZURE_ACCOUNT_OAUTH_CLIENT_SECRET + + ((isAccountSetting) ? accountNameSuffix : ""), + TEST_CLIENT_SECRET); + break; + + case Custom: + providerClassKey = FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME + + (isAccountSetting ? accountNameSuffix : ""); + providerClassValue = TEST_CUSTOM_PROVIDER_CLASS_CONFIG; + break; + + case SAS: + providerClassKey = FS_AZURE_SAS_TOKEN_PROVIDER_TYPE + + (isAccountSetting ? accountNameSuffix : ""); + providerClassValue = TEST_SAS_PROVIDER_CLASS_CONFIG_1; + break; + + default: // set nothing + } + + abfsConf.set(authKey, authType.toString()); + abfsConf.set(providerClassKey, providerClassValue); + } + + private void unsetAuthConfig(AbfsConfiguration abfsConf, boolean isAccountSettings) { + String accountNameSuffix = + isAccountSettings ? ("." + abfsConf.getAccountName()) : ""; + + abfsConf.unset(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME + accountNameSuffix); + abfsConf.unset(FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME + accountNameSuffix); + abfsConf.unset(FS_AZURE_SAS_TOKEN_PROVIDER_TYPE + accountNameSuffix); + + abfsConf.unset(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT + accountNameSuffix); + abfsConf.unset(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID + accountNameSuffix); + abfsConf.unset(FS_AZURE_ACCOUNT_OAUTH_CLIENT_SECRET + accountNameSuffix); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TrileanTests.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TrileanTests.java new file mode 100644 index 0000000000000..45467d4140132 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TrileanTests.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs; + +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TrileanConversionException; +import org.apache.hadoop.fs.azurebfs.enums.Trilean; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; + +/** + * Tests for the enum Trilean. + */ +public class TrileanTests { + + private static final String TRUE_STR = "true"; + private static final String FALSE_STR = "false"; + + @Test + public void testGetTrileanForBoolean() { + assertThat(Trilean.getTrilean(true)).describedAs( + "getTrilean should return Trilean.TRUE when true is passed") + .isEqualTo(Trilean.TRUE); + assertThat(Trilean.getTrilean(false)).describedAs( + "getTrilean should return Trilean.FALSE when false is passed") + .isEqualTo(Trilean.FALSE); + } + + @Test + public void testGetTrileanForString() { + assertThat(Trilean.getTrilean(TRUE_STR.toLowerCase())).describedAs( + "getTrilean should return Trilean.TRUE when true is passed") + .isEqualTo(Trilean.TRUE); + assertThat(Trilean.getTrilean(TRUE_STR.toUpperCase())).describedAs( + "getTrilean should return Trilean.TRUE when TRUE is passed") + .isEqualTo(Trilean.TRUE); + + assertThat(Trilean.getTrilean(FALSE_STR.toLowerCase())).describedAs( + "getTrilean should return Trilean.FALSE when false is passed") + .isEqualTo(Trilean.FALSE); + assertThat(Trilean.getTrilean(FALSE_STR.toUpperCase())).describedAs( + "getTrilean should return Trilean.FALSE when FALSE is passed") + .isEqualTo(Trilean.FALSE); + + testInvalidString(null); + testInvalidString(" "); + testInvalidString("invalid"); + testInvalidString("truee"); + testInvalidString("falsee"); + } + + private void testInvalidString(String invalidString) { + assertThat(Trilean.getTrilean(invalidString)).describedAs( + "getTrilean should return Trilean.UNKNOWN for anything not true/false") + .isEqualTo(Trilean.UNKNOWN); + } + + @Test + public void testToBoolean() throws TrileanConversionException { + assertThat(Trilean.TRUE.toBoolean()) + .describedAs("toBoolean should return true for Trilean.TRUE").isTrue(); + assertThat(Trilean.FALSE.toBoolean()) + .describedAs("toBoolean should return false for Trilean.FALSE") + .isFalse(); + + assertThat(catchThrowable(() -> Trilean.UNKNOWN.toBoolean())).describedAs( + "toBoolean on Trilean.UNKNOWN results in TrileanConversionException") + .isInstanceOf(TrileanConversionException.class).describedAs( + "Exception message should be: catchThrowable(()->Trilean.UNKNOWN" + + ".toBoolean())") + .hasMessage("Cannot convert Trilean.UNKNOWN to boolean"); + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java index c8dcef3ef205e..72ea7661b5a90 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/constants/TestConfigurationKeys.java @@ -27,6 +27,7 @@ public final class TestConfigurationKeys { public static final String FS_AZURE_ACCOUNT_KEY = "fs.azure.account.key"; public static final String FS_AZURE_CONTRACT_TEST_URI = "fs.contract.test.fs.abfs"; public static final String FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT = "fs.azure.test.namespace.enabled"; + public static final String FS_AZURE_TEST_APPENDBLOB_ENABLED = "fs.azure.test.appendblob.enabled"; public static final String FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_ID = "fs.azure.account.oauth2.contributor.client.id"; public static final String FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET = "fs.azure.account.oauth2.contributor.client.secret"; @@ -45,6 +46,14 @@ public final class TestConfigurationKeys { public static final String MOCK_SASTOKENPROVIDER_FAIL_INIT = "mock.sastokenprovider.fail.init"; public static final String MOCK_SASTOKENPROVIDER_RETURN_EMPTY_SAS_TOKEN = "mock.sastokenprovider.return.empty.sasToken"; + public static final String FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_TENANT_ID = "fs.azure.test.app.service.principal.tenant.id"; + + public static final String FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID = "fs.azure.test.app.service.principal.object.id"; + + public static final String FS_AZURE_TEST_APP_ID = "fs.azure.test.app.id"; + + public static final String FS_AZURE_TEST_APP_SECRET = "fs.azure.test.app.secret"; + public static final String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml"; public static final String TEST_CONTAINER_PREFIX = "abfs-testcontainer-"; public static final int TEST_TIMEOUT = 15 * 60 * 1000; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRename.java index b92bef68a0908..cd60e6d5ae498 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRename.java @@ -38,6 +38,11 @@ public ITestAbfsFileSystemContractRename() throws Exception { public void setup() throws Exception { binding.setup(); super.setup(); + // Base rename contract test class re-uses the test folder + // This leads to failures when the test is re-run as same ABFS test + // containers are re-used for test run and creation of source and + // destination test paths fail, as they are already present. + binding.getFileSystem().delete(binding.getTestPath(), true); } @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ListResultSchemaTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ListResultSchemaTest.java new file mode 100644 index 0000000000000..8a33ea5de0641 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ListResultSchemaTest.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.contract; + +import java.io.IOException; + +import org.codehaus.jackson.map.ObjectMapper; +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultEntrySchema; +import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Tests the JSON parsing for the listfilestatus response to ListResultSchema + */ +public class ListResultSchemaTest { + + /** + * Test parsing a JSON which matches the properties in the ListResultSchema + * and ListResultEntrySchema + *

    + * { + * "paths": [ + * { + * "contentLength": "0", + * "etag": "0x8D8186452785ADA", + * "group": "$superuser", + * "lastModified": "Wed, 24 Jun 2020 17:30:43 GMT", + * "name": "dest/filename", + * "owner": "$superuser", + * "permissions": "rw-r--r--" + * } + * ] + * } + */ + @Test + public void testMatchingJSON() throws IOException { + + String matchingJson = + "{ \"paths\": [ { \"contentLength\": \"0\", \"etag\": " + + "\"0x8D8186452785ADA\", \"group\": \"$superuser\", " + + "\"lastModified\": \"Wed, 24 Jun 2020 17:30:43 GMT\", \"name\": " + + "\"dest/filename\", \"owner\": \"$superuser\", \"permissions\": " + + "\"rw-r--r--\" } ] } "; + + final ObjectMapper objectMapper = new ObjectMapper(); + final ListResultSchema listResultSchema = objectMapper + .readValue(matchingJson, ListResultSchema.class); + + assertThat(listResultSchema.paths().size()) + .describedAs("Only one path is expected as present in the input JSON") + .isEqualTo(1); + + ListResultEntrySchema path = listResultSchema.paths().get(0); + assertThat(path.contentLength()) + .describedAs("contentLength should match the value in the input JSON") + .isEqualTo(0L); + assertThat(path.eTag()) + .describedAs("eTag should match the value in the input JSON") + .isEqualTo("0x8D8186452785ADA"); + assertThat(path.group()) + .describedAs("group should match the value in the input JSON") + .isEqualTo("$superuser"); + assertThat(path.lastModified()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("Wed, 24 Jun 2020 17:30:43 GMT"); + assertThat(path.name()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("dest/filename"); + assertThat(path.owner()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("$superuser"); + assertThat(path.permissions()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("rw-r--r--"); + } + + /** + * Test parsing a JSON which matches the properties in the ListResultSchema + * and ListResultEntrySchema along with an unknown property + *

    + * { + * "paths": [ + * { + * "contentLength": "0", + * "unknownProperty": "132374934429527192", + * "etag": "0x8D8186452785ADA", + * "group": "$superuser", + * "lastModified": "Wed, 24 Jun 2020 17:30:43 GMT", + * "name": "dest/filename", + * "owner": "$superuser", + * "permissions": "rw-r--r--" + * } + * ] + * } + */ + @Test + public void testJSONWithUnknownFields() throws IOException { + + String matchingJson = "{ \"paths\": [ { \"contentLength\": \"0\", " + + "\"unknownProperty\": \"132374934429527192\", \"etag\": " + + "\"0x8D8186452785ADA\", \"group\": \"$superuser\", " + + "\"lastModified\": \"Wed, 24 Jun 2020 17:30:43 GMT\", \"name\": " + + "\"dest/filename\", \"owner\": \"$superuser\", \"permissions\": " + + "\"rw-r--r--\" } ] } "; + + final ObjectMapper objectMapper = new ObjectMapper(); + final ListResultSchema listResultSchema = objectMapper + .readValue(matchingJson, ListResultSchema.class); + + assertThat(listResultSchema.paths().size()) + .describedAs("Only one path is expected as present in the input JSON") + .isEqualTo(1); + + ListResultEntrySchema path = listResultSchema.paths().get(0); + assertThat(path.contentLength()) + .describedAs("contentLength should match the value in the input JSON") + .isEqualTo(0L); + assertThat(path.eTag()) + .describedAs("eTag should match the value in the input JSON") + .isEqualTo("0x8D8186452785ADA"); + assertThat(path.group()) + .describedAs("group should match the value in the input JSON") + .isEqualTo("$superuser"); + assertThat(path.lastModified()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("Wed, 24 Jun 2020 17:30:43 GMT"); + assertThat(path.name()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("dest/filename"); + assertThat(path.owner()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("$superuser"); + assertThat(path.permissions()) + .describedAs("lastModified should match the value in the input JSON") + .isEqualTo("rw-r--r--"); + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ClassicDelegationTokenManager.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ClassicDelegationTokenManager.java index f87fc654f0908..f74e3e3b3e9a7 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ClassicDelegationTokenManager.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ClassicDelegationTokenManager.java @@ -22,7 +22,7 @@ import java.net.URI; import java.nio.charset.Charset; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockDelegationSASTokenProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockDelegationSASTokenProvider.java new file mode 100644 index 0000000000000..cf7d51da4c44a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockDelegationSASTokenProvider.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.extensions; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; +import org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys; +import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidUriException; +import org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpHeader; +import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation; +import org.apache.hadoop.fs.azurebfs.utils.Base64; +import org.apache.hadoop.fs.azurebfs.utils.DelegationSASGenerator; +import org.apache.hadoop.fs.azurebfs.utils.SASGenerator; +import org.apache.hadoop.security.AccessControlException; + +/** + * A mock SAS token provider implementation + */ +public class MockDelegationSASTokenProvider implements SASTokenProvider { + + private DelegationSASGenerator generator; + + public static final String TEST_OWNER = "325f1619-4205-432f-9fce-3fd594325ce5"; + public static final String CORRELATION_ID = "66ff4ffc-ff17-417e-a2a9-45db8c5b0b5c"; + public static final String NO_AGENT_PATH = "NoAgentPath"; + + @Override + public void initialize(Configuration configuration, String accountName) throws IOException { + String appID = configuration.get(TestConfigurationKeys.FS_AZURE_TEST_APP_ID); + String appSecret = configuration.get(TestConfigurationKeys.FS_AZURE_TEST_APP_SECRET); + String sktid = configuration.get(TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_TENANT_ID); + String skoid = configuration.get(TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID); + String skt = SASGenerator.ISO_8601_FORMATTER.format(Instant.now().minus(SASGenerator.FIVE_MINUTES)); + String ske = SASGenerator.ISO_8601_FORMATTER.format(Instant.now().plus(SASGenerator.ONE_DAY)); + String skv = SASGenerator.AuthenticationVersion.Dec19.toString(); + + byte[] key = getUserDelegationKey(accountName, appID, appSecret, sktid, skt, ske, skv); + + generator = new DelegationSASGenerator(key, skoid, sktid, skt, ske, skv); + } + + // Invokes the AAD v2.0 authentication endpoint with a client credentials grant to get an + // access token. See https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-client-creds-grant-flow. + private String getAuthorizationHeader(String accountName, String appID, String appSecret, String sktid) throws IOException { + String authEndPoint = String.format("https://login.microsoftonline.com/%s/oauth2/v2.0/token", sktid); + ClientCredsTokenProvider provider = new ClientCredsTokenProvider(authEndPoint, appID, appSecret); + return "Bearer " + provider.getToken().getAccessToken(); + } + + private byte[] getUserDelegationKey(String accountName, String appID, String appSecret, + String sktid, String skt, String ske, String skv) throws IOException { + + String method = "POST"; + String account = accountName.substring(0, accountName.indexOf(AbfsHttpConstants.DOT)); + + final StringBuilder sb = new StringBuilder(128); + sb.append("https://"); + sb.append(account); + sb.append(".blob.core.windows.net/?restype=service&comp=userdelegationkey"); + + URL url; + try { + url = new URL(sb.toString()); + } catch (MalformedURLException ex) { + throw new InvalidUriException(sb.toString()); + } + + List requestHeaders = new ArrayList(); + requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_VERSION, skv)); + requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.CONTENT_TYPE, "application/x-www-form-urlencoded")); + requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.AUTHORIZATION, getAuthorizationHeader(account, appID, appSecret, sktid))); + + final StringBuilder requestBody = new StringBuilder(512); + requestBody.append(""); + requestBody.append(skt); + requestBody.append(""); + requestBody.append(ske); + requestBody.append(""); + + AbfsHttpOperation op = new AbfsHttpOperation(url, method, requestHeaders); + + byte[] requestBuffer = requestBody.toString().getBytes(StandardCharsets.UTF_8.toString()); + op.sendRequest(requestBuffer, 0, requestBuffer.length); + + byte[] responseBuffer = new byte[4 * 1024]; + op.processResponse(responseBuffer, 0, responseBuffer.length); + + String responseBody = new String(responseBuffer, 0, (int) op.getBytesReceived(), StandardCharsets.UTF_8); + int beginIndex = responseBody.indexOf("") + "".length(); + int endIndex = responseBody.indexOf(""); + String value = responseBody.substring(beginIndex, endIndex); + return Base64.decode(value); + } + + /** + * Invokes the authorizer to obtain a SAS token. + * + * @param accountName the name of the storage account. + * @param fileSystem the name of the fileSystem. + * @param path the file or directory path. + * @param operation the operation to be performed on the path. + * @return a SAS token to perform the request operation. + * @throws IOException if there is a network error. + * @throws AccessControlException if access is denied. + */ + @Override + public String getSASToken(String accountName, String fileSystem, String path, + String operation) throws IOException, AccessControlException { + // Except for the special case where we test without an agent, + // the user for these tests is always TEST_OWNER. The check access operation + // requires suoid to check permissions for the user and will throw if the + // user does not have access and otherwise succeed. + String saoid = null; + String suoid = null; + if (path == null || !path.endsWith(NO_AGENT_PATH)) { + saoid = (operation == SASTokenProvider.CHECK_ACCESS_OPERATION) ? null : TEST_OWNER; + suoid = (operation == SASTokenProvider.CHECK_ACCESS_OPERATION) ? TEST_OWNER : null; + } + return generator.getDelegationSAS(accountName, fileSystem, path, operation, + saoid, suoid, CORRELATION_ID); + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockSASTokenProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockSASTokenProvider.java index de841b0b29909..50ac20970f45f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockSASTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/MockSASTokenProvider.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.fs.azurebfs.utils.Base64; -import org.apache.hadoop.fs.azurebfs.utils.SASGenerator; +import org.apache.hadoop.fs.azurebfs.utils.ServiceSASGenerator; /** * A mock SAS token provider implementation @@ -33,7 +33,7 @@ public class MockSASTokenProvider implements SASTokenProvider { private byte[] accountKey; - private SASGenerator generator; + private ServiceSASGenerator generator; private boolean skipAuthorizationForTestSetup = false; // For testing we use a container SAS for all operations. @@ -49,7 +49,7 @@ public void initialize(Configuration configuration, String accountName) throws I } catch (Exception ex) { throw new IOException(ex); } - generator = new SASGenerator(accountKey); + generator = new ServiceSASGenerator(accountKey); } /** diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/StubAbfsTokenIdentifier.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/StubAbfsTokenIdentifier.java index 4271ba6dfdf56..b04517c8ac4df 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/StubAbfsTokenIdentifier.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/StubAbfsTokenIdentifier.java @@ -29,7 +29,7 @@ import java.util.Objects; import java.util.UUID; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java new file mode 100644 index 0000000000000..44b0a362dc6c9 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java @@ -0,0 +1,256 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.Map; +import java.util.Random; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; +import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; +import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystemStore; +import org.junit.Test; + +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; + +public class ITestAbfsInputStream extends AbstractAbfsIntegrationTest { + + protected static final int HUNDRED = 100; + + public ITestAbfsInputStream() throws Exception { + } + + @Test + public void testWithNoOptimization() throws Exception { + for (int i = 2; i <= 7; i++) { + int fileSize = i * ONE_MB; + final AzureBlobFileSystem fs = getFileSystem(false, false, fileSize); + String fileName = methodName.getMethodName() + i; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + testWithNoOptimization(fs, testFilePath, HUNDRED, fileContent); + } + } + + protected void testWithNoOptimization(final FileSystem fs, + final Path testFilePath, final int seekPos, final byte[] fileContent) + throws IOException { + FSDataInputStream iStream = fs.open(testFilePath); + try { + AbfsInputStream abfsInputStream = (AbfsInputStream) iStream + .getWrappedStream(); + + iStream = new FSDataInputStream(abfsInputStream); + seek(iStream, seekPos); + long totalBytesRead = 0; + int length = HUNDRED * HUNDRED; + do { + byte[] buffer = new byte[length]; + int bytesRead = iStream.read(buffer, 0, length); + totalBytesRead += bytesRead; + if ((totalBytesRead + seekPos) >= fileContent.length) { + length = (fileContent.length - seekPos) % length; + } + assertEquals(length, bytesRead); + assertContentReadCorrectly(fileContent, + (int) (seekPos + totalBytesRead - length), length, buffer); + + assertTrue(abfsInputStream.getFCursor() >= seekPos + totalBytesRead); + assertTrue(abfsInputStream.getFCursorAfterLastRead() >= seekPos + totalBytesRead); + assertTrue(abfsInputStream.getBCursor() >= totalBytesRead % abfsInputStream.getBufferSize()); + assertTrue(abfsInputStream.getLimit() >= totalBytesRead % abfsInputStream.getBufferSize()); + } while (totalBytesRead + seekPos < fileContent.length); + } finally { + iStream.close(); + } + } + + @Test + public void testExceptionInOptimization() throws Exception { + for (int i = 2; i <= 7; i++) { + int fileSize = i * ONE_MB; + final AzureBlobFileSystem fs = getFileSystem(true, true, fileSize); + String fileName = methodName.getMethodName() + i; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + testExceptionInOptimization(fs, testFilePath, fileSize - HUNDRED, + fileSize / 4, fileContent); + } + } + + private void testExceptionInOptimization(final FileSystem fs, + final Path testFilePath, + final int seekPos, final int length, final byte[] fileContent) + throws IOException { + + FSDataInputStream iStream = fs.open(testFilePath); + try { + AbfsInputStream abfsInputStream = (AbfsInputStream) iStream + .getWrappedStream(); + abfsInputStream = spy(abfsInputStream); + doThrow(new IOException()) + .doCallRealMethod() + .when(abfsInputStream) + .readRemote(anyLong(), any(), anyInt(), anyInt()); + + iStream = new FSDataInputStream(abfsInputStream); + verifyBeforeSeek(abfsInputStream); + seek(iStream, seekPos); + byte[] buffer = new byte[length]; + int bytesRead = iStream.read(buffer, 0, length); + long actualLength = length; + if (seekPos + length > fileContent.length) { + long delta = seekPos + length - fileContent.length; + actualLength = length - delta; + } + assertEquals(bytesRead, actualLength); + assertContentReadCorrectly(fileContent, seekPos, (int) actualLength, buffer); + assertEquals(fileContent.length, abfsInputStream.getFCursor()); + assertEquals(fileContent.length, abfsInputStream.getFCursorAfterLastRead()); + assertEquals(actualLength, abfsInputStream.getBCursor()); + assertTrue(abfsInputStream.getLimit() >= actualLength); + } finally { + iStream.close(); + } + } + + protected AzureBlobFileSystem getFileSystem(boolean readSmallFilesCompletely) + throws IOException { + final AzureBlobFileSystem fs = getFileSystem(); + getAbfsStore(fs).getAbfsConfiguration() + .setReadSmallFilesCompletely(readSmallFilesCompletely); + return fs; + } + + private AzureBlobFileSystem getFileSystem(boolean optimizeFooterRead, + boolean readSmallFileCompletely, int fileSize) throws IOException { + final AzureBlobFileSystem fs = getFileSystem(); + getAbfsStore(fs).getAbfsConfiguration() + .setOptimizeFooterRead(optimizeFooterRead); + if (fileSize <= getAbfsStore(fs).getAbfsConfiguration() + .getReadBufferSize()) { + getAbfsStore(fs).getAbfsConfiguration() + .setReadSmallFilesCompletely(readSmallFileCompletely); + } + return fs; + } + + protected byte[] getRandomBytesArray(int length) { + final byte[] b = new byte[length]; + new Random().nextBytes(b); + return b; + } + + protected Path createFileWithContent(FileSystem fs, String fileName, + byte[] fileContent) throws IOException { + Path testFilePath = path(fileName); + try (FSDataOutputStream oStream = fs.create(testFilePath)) { + oStream.write(fileContent); + oStream.flush(); + } + return testFilePath; + } + + protected AzureBlobFileSystemStore getAbfsStore(FileSystem fs) + throws NoSuchFieldException, IllegalAccessException { + AzureBlobFileSystem abfs = (AzureBlobFileSystem) fs; + Field abfsStoreField = AzureBlobFileSystem.class + .getDeclaredField("abfsStore"); + abfsStoreField.setAccessible(true); + return (AzureBlobFileSystemStore) abfsStoreField.get(abfs); + } + + protected Map getInstrumentationMap(FileSystem fs) + throws NoSuchFieldException, IllegalAccessException { + AzureBlobFileSystem abfs = (AzureBlobFileSystem) fs; + Field abfsCountersField = AzureBlobFileSystem.class + .getDeclaredField("abfsCounters"); + abfsCountersField.setAccessible(true); + AbfsCounters abfsCounters = (AbfsCounters) abfsCountersField.get(abfs); + return abfsCounters.toMap(); + } + + protected void assertContentReadCorrectly(byte[] actualFileContent, int from, + int len, byte[] contentRead) { + for (int i = 0; i < len; i++) { + assertEquals(contentRead[i], actualFileContent[i + from]); + } + } + + protected void assertBuffersAreNotEqual(byte[] actualContent, + byte[] contentRead, AbfsConfiguration conf) { + assertBufferEquality(actualContent, contentRead, conf, false); + } + + protected void assertBuffersAreEqual(byte[] actualContent, byte[] contentRead, + AbfsConfiguration conf) { + assertBufferEquality(actualContent, contentRead, conf, true); + } + + private void assertBufferEquality(byte[] actualContent, byte[] contentRead, + AbfsConfiguration conf, boolean assertEqual) { + int bufferSize = conf.getReadBufferSize(); + int actualContentSize = actualContent.length; + int n = (actualContentSize < bufferSize) ? actualContentSize : bufferSize; + int matches = 0; + for (int i = 0; i < n; i++) { + if (actualContent[i] == contentRead[i]) { + matches++; + } + } + if (assertEqual) { + assertEquals(n, matches); + } else { + assertNotEquals(n, matches); + } + } + + protected void seek(FSDataInputStream iStream, long seekPos) + throws IOException { + AbfsInputStream abfsInputStream = (AbfsInputStream) iStream.getWrappedStream(); + verifyBeforeSeek(abfsInputStream); + iStream.seek(seekPos); + verifyAfterSeek(abfsInputStream, seekPos); + } + + private void verifyBeforeSeek(AbfsInputStream abfsInputStream){ + assertEquals(0, abfsInputStream.getFCursor()); + assertEquals(-1, abfsInputStream.getFCursorAfterLastRead()); + assertEquals(0, abfsInputStream.getLimit()); + assertEquals(0, abfsInputStream.getBCursor()); + } + + private void verifyAfterSeek(AbfsInputStream abfsInputStream, long seekPos){ + assertEquals(seekPos, abfsInputStream.getFCursor()); + assertEquals(-1, abfsInputStream.getFCursorAfterLastRead()); + assertEquals(0, abfsInputStream.getLimit()); + assertEquals(0, abfsInputStream.getBCursor()); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java new file mode 100644 index 0000000000000..09a810c5fc9e8 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java @@ -0,0 +1,358 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; +import java.util.Map; + +import org.junit.Test; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; + +import static java.lang.Math.max; +import static java.lang.Math.min; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; + +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CONNECTIONS_MADE; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_KB; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB; + +public class ITestAbfsInputStreamReadFooter extends ITestAbfsInputStream { + + private static final int TEN = 10; + private static final int TWENTY = 20; + + public ITestAbfsInputStreamReadFooter() throws Exception { + } + + @Test + public void testOnlyOneServerCallIsMadeWhenTheConfIsTrue() throws Exception { + testNumBackendCalls(true); + } + + @Test + public void testMultipleServerCallsAreMadeWhenTheConfIsFalse() + throws Exception { + testNumBackendCalls(false); + } + + private void testNumBackendCalls(boolean optimizeFooterRead) + throws Exception { + for (int i = 1; i <= 4; i++) { + int fileSize = i * ONE_MB; + final AzureBlobFileSystem fs = getFileSystem(optimizeFooterRead, + fileSize); + String fileName = methodName.getMethodName() + i; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + int length = AbfsInputStream.FOOTER_SIZE; + try (FSDataInputStream iStream = fs.open(testFilePath)) { + byte[] buffer = new byte[length]; + + Map metricMap = getInstrumentationMap(fs); + long requestsMadeBeforeTest = metricMap + .get(CONNECTIONS_MADE.getStatName()); + + iStream.seek(fileSize - 8); + iStream.read(buffer, 0, length); + + iStream.seek(fileSize - (TEN * ONE_KB)); + iStream.read(buffer, 0, length); + + iStream.seek(fileSize - (TWENTY * ONE_KB)); + iStream.read(buffer, 0, length); + + metricMap = getInstrumentationMap(fs); + long requestsMadeAfterTest = metricMap + .get(CONNECTIONS_MADE.getStatName()); + + if (optimizeFooterRead) { + assertEquals(1, requestsMadeAfterTest - requestsMadeBeforeTest); + } else { + assertEquals(3, requestsMadeAfterTest - requestsMadeBeforeTest); + } + } + } + } + + @Test + public void testSeekToBeginAndReadWithConfTrue() throws Exception { + testSeekAndReadWithConf(true, SeekTo.BEGIN); + } + + @Test + public void testSeekToBeginAndReadWithConfFalse() throws Exception { + testSeekAndReadWithConf(false, SeekTo.BEGIN); + } + + @Test + public void testSeekToBeforeFooterAndReadWithConfTrue() throws Exception { + testSeekAndReadWithConf(true, SeekTo.BEFORE_FOOTER_START); + } + + @Test + public void testSeekToBeforeFooterAndReadWithConfFalse() throws Exception { + testSeekAndReadWithConf(false, SeekTo.BEFORE_FOOTER_START); + } + + @Test + public void testSeekToFooterAndReadWithConfTrue() throws Exception { + testSeekAndReadWithConf(true, SeekTo.AT_FOOTER_START); + } + + @Test + public void testSeekToFooterAndReadWithConfFalse() throws Exception { + testSeekAndReadWithConf(false, SeekTo.AT_FOOTER_START); + } + + @Test + public void testSeekToAfterFooterAndReadWithConfTrue() throws Exception { + testSeekAndReadWithConf(true, SeekTo.AFTER_FOOTER_START); + } + + @Test + public void testSeekToToAfterFooterAndReadWithConfFalse() throws Exception { + testSeekAndReadWithConf(false, SeekTo.AFTER_FOOTER_START); + } + + @Test + public void testSeekToEndAndReadWithConfTrue() throws Exception { + testSeekAndReadWithConf(true, SeekTo.END); + } + + @Test + public void testSeekToEndAndReadWithConfFalse() throws Exception { + testSeekAndReadWithConf(false, SeekTo.END); + } + + private void testSeekAndReadWithConf(boolean optimizeFooterRead, + SeekTo seekTo) throws Exception { + for (int i = 2; i <= 6; i++) { + int fileSize = i * ONE_MB; + final AzureBlobFileSystem fs = getFileSystem(optimizeFooterRead, + fileSize); + String fileName = methodName.getMethodName() + i; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + seekReadAndTest(fs, testFilePath, seekPos(seekTo, fileSize), HUNDRED, + fileContent); + } + } + + private int seekPos(SeekTo seekTo, int fileSize) { + if (seekTo == SeekTo.BEGIN) { + return 0; + } + if (seekTo == SeekTo.BEFORE_FOOTER_START) { + return fileSize - AbfsInputStream.FOOTER_SIZE - 1; + } + if (seekTo == SeekTo.AT_FOOTER_START) { + return fileSize - AbfsInputStream.FOOTER_SIZE; + } + if (seekTo == SeekTo.END) { + return fileSize - 1; + } + //seekTo == SeekTo.AFTER_FOOTER_START + return fileSize - AbfsInputStream.FOOTER_SIZE + 1; + } + + private void seekReadAndTest(final FileSystem fs, final Path testFilePath, + final int seekPos, final int length, final byte[] fileContent) + throws IOException, NoSuchFieldException, IllegalAccessException { + AbfsConfiguration conf = getAbfsStore(fs).getAbfsConfiguration(); + long actualContentLength = fileContent.length; + try (FSDataInputStream iStream = fs.open(testFilePath)) { + AbfsInputStream abfsInputStream = (AbfsInputStream) iStream + .getWrappedStream(); + long bufferSize = abfsInputStream.getBufferSize(); + seek(iStream, seekPos); + byte[] buffer = new byte[length]; + long bytesRead = iStream.read(buffer, 0, length); + + long footerStart = max(0, + actualContentLength - AbfsInputStream.FOOTER_SIZE); + boolean optimizationOn = + conf.optimizeFooterRead() && seekPos >= footerStart; + + long actualLength = length; + if (seekPos + length > actualContentLength) { + long delta = seekPos + length - actualContentLength; + actualLength = length - delta; + } + long expectedLimit; + long expectedBCurson; + long expectedFCursor; + if (optimizationOn) { + if (actualContentLength <= bufferSize) { + expectedLimit = actualContentLength; + expectedBCurson = seekPos + actualLength; + } else { + expectedLimit = bufferSize; + long lastBlockStart = max(0, actualContentLength - bufferSize); + expectedBCurson = seekPos - lastBlockStart + actualLength; + } + expectedFCursor = actualContentLength; + } else { + if (seekPos + bufferSize < actualContentLength) { + expectedLimit = bufferSize; + expectedFCursor = bufferSize; + } else { + expectedLimit = actualContentLength - seekPos; + expectedFCursor = min(seekPos + bufferSize, actualContentLength); + } + expectedBCurson = actualLength; + } + + assertEquals(expectedFCursor, abfsInputStream.getFCursor()); + assertEquals(expectedFCursor, abfsInputStream.getFCursorAfterLastRead()); + assertEquals(expectedLimit, abfsInputStream.getLimit()); + assertEquals(expectedBCurson, abfsInputStream.getBCursor()); + assertEquals(actualLength, bytesRead); + // Verify user-content read + assertContentReadCorrectly(fileContent, seekPos, (int) actualLength, buffer); + // Verify data read to AbfsInputStream buffer + int from = seekPos; + if (optimizationOn) { + from = (int) max(0, actualContentLength - bufferSize); + } + assertContentReadCorrectly(fileContent, from, (int) abfsInputStream.getLimit(), + abfsInputStream.getBuffer()); + } + } + + @Test + public void testPartialReadWithNoData() + throws Exception { + for (int i = 2; i <= 6; i++) { + int fileSize = i * ONE_MB; + final AzureBlobFileSystem fs = getFileSystem(true, fileSize); + String fileName = methodName.getMethodName() + i; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + testPartialReadWithNoData(fs, testFilePath, + fileSize - AbfsInputStream.FOOTER_SIZE, AbfsInputStream.FOOTER_SIZE, + fileContent); + } + } + + private void testPartialReadWithNoData(final FileSystem fs, + final Path testFilePath, final int seekPos, final int length, + final byte[] fileContent) + throws IOException, NoSuchFieldException, IllegalAccessException { + FSDataInputStream iStream = fs.open(testFilePath); + try { + AbfsInputStream abfsInputStream = (AbfsInputStream) iStream + .getWrappedStream(); + abfsInputStream = spy(abfsInputStream); + doReturn(10).doReturn(10).doCallRealMethod().when(abfsInputStream) + .readRemote(anyLong(), any(), anyInt(), anyInt()); + + iStream = new FSDataInputStream(abfsInputStream); + seek(iStream, seekPos); + + byte[] buffer = new byte[length]; + int bytesRead = iStream.read(buffer, 0, length); + assertEquals(length, bytesRead); + assertContentReadCorrectly(fileContent, seekPos, length, buffer); + assertEquals(fileContent.length, abfsInputStream.getFCursor()); + assertEquals(length, abfsInputStream.getBCursor()); + assertTrue(abfsInputStream.getLimit() >= length); + } finally { + iStream.close(); + } + } + + @Test + public void testPartialReadWithSomeDat() + throws Exception { + for (int i = 3; i <= 6; i++) { + int fileSize = i * ONE_MB; + final AzureBlobFileSystem fs = getFileSystem(true, fileSize); + String fileName = methodName.getMethodName() + i; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + testPartialReadWithSomeDat(fs, testFilePath, + fileSize - AbfsInputStream.FOOTER_SIZE, AbfsInputStream.FOOTER_SIZE, + fileContent); + } + } + + private void testPartialReadWithSomeDat(final FileSystem fs, + final Path testFilePath, final int seekPos, final int length, + final byte[] fileContent) + throws IOException, NoSuchFieldException, IllegalAccessException { + FSDataInputStream iStream = fs.open(testFilePath); + try { + AbfsInputStream abfsInputStream = (AbfsInputStream) iStream + .getWrappedStream(); + abfsInputStream = spy(abfsInputStream); + // first readRemote, will return first 10 bytes + // second readRemote returns data till the last 2 bytes + int someDataLength = 2; + int secondReturnSize = + min(fileContent.length, abfsInputStream.getBufferSize()) - 10 + - someDataLength; + doReturn(10).doReturn(secondReturnSize).doCallRealMethod() + .when(abfsInputStream) + .readRemote(anyLong(), any(), anyInt(), anyInt()); + + iStream = new FSDataInputStream(abfsInputStream); + seek(iStream, seekPos); + + byte[] buffer = new byte[length]; + int bytesRead = iStream.read(buffer, 0, length); + assertEquals(length, bytesRead); + assertEquals(fileContent.length, abfsInputStream.getFCursor()); + // someDataLength(2), because in the do-while loop in read, the 2nd loop + // will go to readoneblock and that resets the bCursor to 0 as + // bCursor == limit finally when the 2 bytes are read bCursor and limit + // will be at someDataLength(2) + assertEquals(someDataLength, abfsInputStream.getBCursor()); + assertEquals(someDataLength, abfsInputStream.getLimit()); + } finally { + iStream.close(); + } + } + + private AzureBlobFileSystem getFileSystem(boolean optimizeFooterRead, + int fileSize) throws IOException { + final AzureBlobFileSystem fs = getFileSystem(); + getAbfsStore(fs).getAbfsConfiguration() + .setOptimizeFooterRead(optimizeFooterRead); + if (fileSize <= getAbfsStore(fs).getAbfsConfiguration() + .getReadBufferSize()) { + getAbfsStore(fs).getAbfsConfiguration() + .setReadSmallFilesCompletely(false); + } + return fs; + } + + private enum SeekTo { + BEGIN, AT_FOOTER_START, BEFORE_FOOTER_START, AFTER_FOOTER_START, END + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java new file mode 100644 index 0000000000000..ff03c0e78f4a3 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java @@ -0,0 +1,326 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; +import java.util.Map; + +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; + +import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.CONNECTIONS_MADE; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_KB; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB; + +public class ITestAbfsInputStreamSmallFileReads extends ITestAbfsInputStream { + + public ITestAbfsInputStreamSmallFileReads() throws Exception { + } + + @Test + public void testOnlyOneServerCallIsMadeWhenTheConfIsTrue() throws Exception { + testNumBackendCalls(true); + } + + @Test + public void testMultipleServerCallsAreMadeWhenTheConfIsFalse() + throws Exception { + testNumBackendCalls(false); + } + + private void testNumBackendCalls(boolean readSmallFilesCompletely) + throws Exception { + final AzureBlobFileSystem fs = getFileSystem(readSmallFilesCompletely); + for (int i = 1; i <= 4; i++) { + String fileName = methodName.getMethodName() + i; + int fileSize = i * ONE_MB; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + int length = ONE_KB; + try (FSDataInputStream iStream = fs.open(testFilePath)) { + byte[] buffer = new byte[length]; + + Map metricMap = getInstrumentationMap(fs); + long requestsMadeBeforeTest = metricMap + .get(CONNECTIONS_MADE.getStatName()); + + iStream.seek(seekPos(SeekTo.END, fileSize, length)); + iStream.read(buffer, 0, length); + + iStream.seek(seekPos(SeekTo.MIDDLE, fileSize, length)); + iStream.read(buffer, 0, length); + + iStream.seek(seekPos(SeekTo.BEGIN, fileSize, length)); + iStream.read(buffer, 0, length); + + metricMap = getInstrumentationMap(fs); + long requestsMadeAfterTest = metricMap + .get(CONNECTIONS_MADE.getStatName()); + + if (readSmallFilesCompletely) { + assertEquals(1, requestsMadeAfterTest - requestsMadeBeforeTest); + } else { + assertEquals(3, requestsMadeAfterTest - requestsMadeBeforeTest); + } + } + } + } + + @Test + public void testSeekToBeginingAndReadSmallFileWithConfTrue() + throws Exception { + testSeekAndReadWithConf(SeekTo.BEGIN, 2, 4, true); + } + + @Test + public void testSeekToBeginingAndReadSmallFileWithConfFalse() + throws Exception { + testSeekAndReadWithConf(SeekTo.BEGIN, 2, 4, false); + } + + @Test + public void testSeekToBeginingAndReadBigFileWithConfTrue() throws Exception { + testSeekAndReadWithConf(SeekTo.BEGIN, 5, 6, true); + } + + @Test + public void testSeekToBeginingAndReadBigFileWithConfFalse() throws Exception { + testSeekAndReadWithConf(SeekTo.BEGIN, 5, 6, false); + } + + @Test + public void testSeekToEndAndReadSmallFileWithConfTrue() throws Exception { + testSeekAndReadWithConf(SeekTo.END, 2, 4, true); + } + + @Test + public void testSeekToEndAndReadSmallFileWithConfFalse() throws Exception { + testSeekAndReadWithConf(SeekTo.END, 2, 4, false); + } + + @Test + public void testSeekToEndAndReadBigFileWithConfTrue() throws Exception { + testSeekAndReadWithConf(SeekTo.END, 5, 6, true); + } + + @Test + public void testSeekToEndAndReaBigFiledWithConfFalse() throws Exception { + testSeekAndReadWithConf(SeekTo.END, 5, 6, false); + } + + @Test + public void testSeekToMiddleAndReadSmallFileWithConfTrue() throws Exception { + testSeekAndReadWithConf(SeekTo.MIDDLE, 2, 4, true); + } + + @Test + public void testSeekToMiddleAndReadSmallFileWithConfFalse() throws Exception { + testSeekAndReadWithConf(SeekTo.MIDDLE, 2, 4, false); + } + + @Test + public void testSeekToMiddleAndReaBigFileWithConfTrue() throws Exception { + testSeekAndReadWithConf(SeekTo.MIDDLE, 5, 6, true); + } + + @Test + public void testSeekToMiddleAndReadBigFileWithConfFalse() throws Exception { + testSeekAndReadWithConf(SeekTo.MIDDLE, 5, 6, false); + } + + private void testSeekAndReadWithConf(SeekTo seekTo, int startFileSizeInMB, + int endFileSizeInMB, boolean readSmallFilesCompletely) throws Exception { + final AzureBlobFileSystem fs = getFileSystem(readSmallFilesCompletely); + for (int i = startFileSizeInMB; i <= endFileSizeInMB; i++) { + String fileName = methodName.getMethodName() + i; + int fileSize = i * ONE_MB; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + int length = ONE_KB; + int seekPos = seekPos(seekTo, fileSize, length); + seekReadAndTest(fs, testFilePath, seekPos, length, fileContent); + } + } + + private int seekPos(SeekTo seekTo, int fileSize, int length) { + if (seekTo == SeekTo.BEGIN) { + return 0; + } + if (seekTo == SeekTo.END) { + return fileSize - length; + } + return fileSize / 2; + } + + private void seekReadAndTest(FileSystem fs, Path testFilePath, int seekPos, + int length, byte[] fileContent) + throws IOException, NoSuchFieldException, IllegalAccessException { + AbfsConfiguration conf = getAbfsStore(fs).getAbfsConfiguration(); + try (FSDataInputStream iStream = fs.open(testFilePath)) { + seek(iStream, seekPos); + byte[] buffer = new byte[length]; + int bytesRead = iStream.read(buffer, 0, length); + assertEquals(bytesRead, length); + assertContentReadCorrectly(fileContent, seekPos, length, buffer); + AbfsInputStream abfsInputStream = (AbfsInputStream) iStream + .getWrappedStream(); + + final int readBufferSize = conf.getReadBufferSize(); + final int fileContentLength = fileContent.length; + final boolean smallFile = fileContentLength <= readBufferSize; + int expectedLimit, expectedFCursor; + int expectedBCursor; + if (conf.readSmallFilesCompletely() && smallFile) { + assertBuffersAreEqual(fileContent, abfsInputStream.getBuffer(), conf); + expectedFCursor = fileContentLength; + expectedLimit = fileContentLength; + expectedBCursor = seekPos + length; + } else { + if ((seekPos == 0)) { + assertBuffersAreEqual(fileContent, abfsInputStream.getBuffer(), conf); + } else { + assertBuffersAreNotEqual(fileContent, abfsInputStream.getBuffer(), + conf); + } + expectedBCursor = length; + expectedFCursor = (fileContentLength < (seekPos + readBufferSize)) + ? fileContentLength + : (seekPos + readBufferSize); + expectedLimit = (fileContentLength < (seekPos + readBufferSize)) + ? (fileContentLength - seekPos) + : readBufferSize; + } + assertEquals(expectedFCursor, abfsInputStream.getFCursor()); + assertEquals(expectedFCursor, abfsInputStream.getFCursorAfterLastRead()); + assertEquals(expectedBCursor, abfsInputStream.getBCursor()); + assertEquals(expectedLimit, abfsInputStream.getLimit()); + } + } + + @Test + public void testPartialReadWithNoData() throws Exception { + for (int i = 2; i <= 4; i++) { + int fileSize = i * ONE_MB; + final AzureBlobFileSystem fs = getFileSystem(true); + String fileName = methodName.getMethodName() + i; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + partialReadWithNoData(fs, testFilePath, fileSize / 2, fileSize / 4, + fileContent); + } + } + + private void partialReadWithNoData(final FileSystem fs, + final Path testFilePath, + final int seekPos, final int length, final byte[] fileContent) + throws IOException { + + FSDataInputStream iStream = fs.open(testFilePath); + try { + AbfsInputStream abfsInputStream = (AbfsInputStream) iStream + .getWrappedStream(); + abfsInputStream = spy(abfsInputStream); + doReturn(10) + .doReturn(10) + .doCallRealMethod() + .when(abfsInputStream) + .readRemote(anyLong(), any(), anyInt(), anyInt()); + + iStream = new FSDataInputStream(abfsInputStream); + seek(iStream, seekPos); + byte[] buffer = new byte[length]; + int bytesRead = iStream.read(buffer, 0, length); + assertEquals(bytesRead, length); + assertContentReadCorrectly(fileContent, seekPos, length, buffer); + assertEquals(fileContent.length, abfsInputStream.getFCursor()); + assertEquals(fileContent.length, + abfsInputStream.getFCursorAfterLastRead()); + assertEquals(length, abfsInputStream.getBCursor()); + assertTrue(abfsInputStream.getLimit() >= length); + } finally { + iStream.close(); + } + } + + @Test + public void testPartialReadWithSomeData() throws Exception { + for (int i = 2; i <= 4; i++) { + int fileSize = i * ONE_MB; + final AzureBlobFileSystem fs = getFileSystem(true); + String fileName = methodName.getMethodName() + i; + byte[] fileContent = getRandomBytesArray(fileSize); + Path testFilePath = createFileWithContent(fs, fileName, fileContent); + partialReadWithSomeData(fs, testFilePath, fileSize / 2, + fileSize / 4, fileContent); + } + } + + private void partialReadWithSomeData(final FileSystem fs, + final Path testFilePath, + final int seekPos, final int length, final byte[] fileContent) + throws IOException, NoSuchFieldException, IllegalAccessException { + FSDataInputStream iStream = fs.open(testFilePath); + try { + AbfsInputStream abfsInputStream = (AbfsInputStream) iStream + .getWrappedStream(); + abfsInputStream = spy(abfsInputStream); + // first readRemote, will return first 10 bytes + // second readRemote, seekPos - someDataLength(10) will reach the + // seekPos as 10 bytes are already read in the first call. Plus + // someDataLength(10) + int someDataLength = 10; + int secondReturnSize = seekPos - 10 + someDataLength; + doReturn(10) + .doReturn(secondReturnSize) + .doCallRealMethod() + .when(abfsInputStream) + .readRemote(anyLong(), any(), anyInt(), anyInt()); + + iStream = new FSDataInputStream(abfsInputStream); + seek(iStream, seekPos); + + byte[] buffer = new byte[length]; + int bytesRead = iStream.read(buffer, 0, length); + assertEquals(length, bytesRead); + assertTrue(abfsInputStream.getFCursor() > seekPos + length); + assertTrue(abfsInputStream.getFCursorAfterLastRead() > seekPos + length); + // Optimized read was no complete but it got some user requested data + // from server. So obviously the buffer will contain data more than + // seekPos + len + assertEquals(length - someDataLength, abfsInputStream.getBCursor()); + assertTrue(abfsInputStream.getLimit() > length - someDataLength); + } finally { + iStream.close(); + } + } + + private enum SeekTo {BEGIN, MIDDLE, END} + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java new file mode 100644 index 0000000000000..fff005114fbe0 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import org.assertj.core.api.Assertions; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; +import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; +import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; + +/** + * Test create operation. + */ +public class ITestAbfsOutputStream extends AbstractAbfsIntegrationTest { + private static final Path TEST_FILE_PATH = new Path("testfile"); + + public ITestAbfsOutputStream() throws Exception { + super(); + } + + @Test + public void testMaxRequestsAndQueueCapacityDefaults() throws Exception { + Configuration conf = getRawConfiguration(); + final AzureBlobFileSystem fs = getFileSystem(conf); + try (FSDataOutputStream out = fs.create(TEST_FILE_PATH)) { + AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); + + int maxConcurrentRequests + = getConfiguration().getWriteMaxConcurrentRequestCount(); + if (stream.isAppendBlobStream()) { + maxConcurrentRequests = 1; + } + + Assertions.assertThat(stream.getMaxConcurrentRequestCount()).describedAs( + "maxConcurrentRequests should be " + maxConcurrentRequests) + .isEqualTo(maxConcurrentRequests); + Assertions.assertThat(stream.getMaxRequestsThatCanBeQueued()).describedAs( + "maxRequestsToQueue should be " + getConfiguration() + .getMaxWriteRequestsToQueue()) + .isEqualTo(getConfiguration().getMaxWriteRequestsToQueue()); + } + } + + @Test + public void testMaxRequestsAndQueueCapacity() throws Exception { + Configuration conf = getRawConfiguration(); + int maxConcurrentRequests = 6; + int maxRequestsToQueue = 10; + conf.set(ConfigurationKeys.AZURE_WRITE_MAX_CONCURRENT_REQUESTS, + "" + maxConcurrentRequests); + conf.set(ConfigurationKeys.AZURE_WRITE_MAX_REQUESTS_TO_QUEUE, + "" + maxRequestsToQueue); + final AzureBlobFileSystem fs = getFileSystem(conf); + FSDataOutputStream out = fs.create(TEST_FILE_PATH); + AbfsOutputStream stream = (AbfsOutputStream) out.getWrappedStream(); + + if (stream.isAppendBlobStream()) { + maxConcurrentRequests = 1; + } + + Assertions.assertThat(stream.getMaxConcurrentRequestCount()) + .describedAs("maxConcurrentRequests should be " + maxConcurrentRequests) + .isEqualTo(maxConcurrentRequests); + Assertions.assertThat(stream.getMaxRequestsThatCanBeQueued()) + .describedAs("maxRequestsToQueue should be " + maxRequestsToQueue) + .isEqualTo(maxRequestsToQueue); + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPositionedRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPositionedRead.java new file mode 100644 index 0000000000000..25f33db1cae9e --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPositionedRead.java @@ -0,0 +1,233 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; + +import org.junit.Rule; +import org.junit.rules.TestName; +import org.junit.Test; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FutureDataInputStreamBuilder; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; +import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.statistics.IOStatistics; +import org.apache.hadoop.fs.statistics.StreamStatisticNames; +import org.assertj.core.api.Assertions; + +public class ITestAbfsPositionedRead extends AbstractAbfsIntegrationTest { + + private static final int TEST_FILE_DATA_SIZE = 100; + + @Rule + public TestName methodName = new TestName(); + + public ITestAbfsPositionedRead() throws Exception { + } + + @Test + public void testPositionedRead() throws IOException { + describe("Testing positioned reads in AbfsInputStream"); + Path dest = path(methodName.getMethodName()); + + byte[] data = ContractTestUtils.dataset(TEST_FILE_DATA_SIZE, 'a', 'z'); + ContractTestUtils.writeDataset(getFileSystem(), dest, data, data.length, + TEST_FILE_DATA_SIZE, true); + int bytesToRead = 10; + try (FSDataInputStream inputStream = getFileSystem().open(dest)) { + assertTrue( + "unexpected stream type " + + inputStream.getWrappedStream().getClass().getSimpleName(), + inputStream.getWrappedStream() instanceof AbfsInputStream); + byte[] readBuffer = new byte[bytesToRead]; + int readPos = 0; + Assertions + .assertThat(inputStream.read(readPos, readBuffer, 0, bytesToRead)) + .describedAs( + "AbfsInputStream pread did not read the correct number of bytes") + .isEqualTo(bytesToRead); + Assertions.assertThat(readBuffer) + .describedAs("AbfsInputStream pread did not read correct data") + .containsExactly( + Arrays.copyOfRange(data, readPos, readPos + bytesToRead)); + // Read only 10 bytes from offset 0. But by default it will do the seek + // and read where the entire 100 bytes get read into the + // AbfsInputStream buffer. + Assertions + .assertThat(Arrays.copyOfRange( + ((AbfsInputStream) inputStream.getWrappedStream()).getBuffer(), 0, + TEST_FILE_DATA_SIZE)) + .describedAs( + "AbfsInputStream pread did not read more data into its buffer") + .containsExactly(data); + // Check statistics + assertStatistics(inputStream.getIOStatistics(), bytesToRead, 1, 1, + TEST_FILE_DATA_SIZE); + + readPos = 50; + Assertions + .assertThat(inputStream.read(readPos, readBuffer, 0, bytesToRead)) + .describedAs( + "AbfsInputStream pread did not read the correct number of bytes") + .isEqualTo(bytesToRead); + Assertions.assertThat(readBuffer) + .describedAs("AbfsInputStream pread did not read correct data") + .containsExactly( + Arrays.copyOfRange(data, readPos, readPos + bytesToRead)); + // Check statistics + assertStatistics(inputStream.getIOStatistics(), 2 * bytesToRead, 2, 1, + TEST_FILE_DATA_SIZE); + // Did positioned read from pos 0 and then 50 but the stream pos should + // remain at 0. + Assertions.assertThat(inputStream.getPos()) + .describedAs("AbfsInputStream positioned reads moved stream position") + .isEqualTo(0); + } + } + + private void assertStatistics(IOStatistics ioStatistics, + long expectedBytesRead, long expectedReadOps, long expectedRemoteReadOps, + long expectedRemoteReadBytes) { + Assertions + .assertThat(ioStatistics.counters() + .get(StreamStatisticNames.STREAM_READ_BYTES).longValue()) + .describedAs("Mismatch in bytesRead statistics") + .isEqualTo(expectedBytesRead); + Assertions + .assertThat(ioStatistics.counters() + .get(StreamStatisticNames.STREAM_READ_OPERATIONS).longValue()) + .describedAs("Mismatch in readOps statistics") + .isEqualTo(expectedReadOps); + Assertions + .assertThat(ioStatistics.counters() + .get(StreamStatisticNames.REMOTE_READ_OP).longValue()) + .describedAs("Mismatch in remoteReadOps statistics") + .isEqualTo(expectedRemoteReadOps); + Assertions + .assertThat(ioStatistics.counters() + .get(StreamStatisticNames.REMOTE_BYTES_READ).longValue()) + .describedAs("Mismatch in remoteReadBytes statistics") + .isEqualTo(expectedRemoteReadBytes); + } + + @Test + public void testPositionedReadWithBufferedReadDisabled() throws IOException { + describe("Testing positioned reads in AbfsInputStream with BufferedReadDisabled"); + Path dest = path(methodName.getMethodName()); + byte[] data = ContractTestUtils.dataset(TEST_FILE_DATA_SIZE, 'a', 'z'); + ContractTestUtils.writeDataset(getFileSystem(), dest, data, data.length, + TEST_FILE_DATA_SIZE, true); + FutureDataInputStreamBuilder builder = getFileSystem().openFile(dest); + builder.opt(ConfigurationKeys.FS_AZURE_BUFFERED_PREAD_DISABLE, true); + FSDataInputStream inputStream = null; + try { + inputStream = builder.build().get(); + } catch (IllegalArgumentException | UnsupportedOperationException + | InterruptedException | ExecutionException e) { + throw new IOException( + "Exception opening " + dest + " with FutureDataInputStreamBuilder", + e); + } + assertNotNull("Null InputStream over " + dest, inputStream); + int bytesToRead = 10; + try { + AbfsInputStream abfsIs = (AbfsInputStream) inputStream.getWrappedStream(); + byte[] readBuffer = new byte[bytesToRead]; + int readPos = 10; + Assertions + .assertThat(inputStream.read(readPos, readBuffer, 0, bytesToRead)) + .describedAs( + "AbfsInputStream pread did not read the correct number of bytes") + .isEqualTo(bytesToRead); + Assertions.assertThat(readBuffer) + .describedAs("AbfsInputStream pread did not read correct data") + .containsExactly( + Arrays.copyOfRange(data, readPos, readPos + bytesToRead)); + // Read only 10 bytes from offset 10. This time, as buffered pread is + // disabled, it will only read the exact bytes as requested and no data + // will get read into the AbfsInputStream#buffer. Infact the buffer won't + // even get initialized. + assertNull("AbfsInputStream pread caused the internal buffer creation", + abfsIs.getBuffer()); + // Check statistics + assertStatistics(inputStream.getIOStatistics(), bytesToRead, 1, 1, + bytesToRead); + readPos = 40; + Assertions + .assertThat(inputStream.read(readPos, readBuffer, 0, bytesToRead)) + .describedAs( + "AbfsInputStream pread did not read the correct number of bytes") + .isEqualTo(bytesToRead); + Assertions.assertThat(readBuffer) + .describedAs("AbfsInputStream pread did not read correct data") + .containsExactly( + Arrays.copyOfRange(data, readPos, readPos + bytesToRead)); + assertStatistics(inputStream.getIOStatistics(), 2 * bytesToRead, 2, 2, + 2 * bytesToRead); + // Now make a seek and read so that internal buffer gets created + inputStream.seek(0); + Assertions.assertThat(inputStream.read(readBuffer)).describedAs( + "AbfsInputStream seek+read did not read the correct number of bytes") + .isEqualTo(bytesToRead); + // This read would have fetched all 100 bytes into internal buffer. + Assertions + .assertThat(Arrays.copyOfRange( + ((AbfsInputStream) inputStream.getWrappedStream()).getBuffer(), 0, + TEST_FILE_DATA_SIZE)) + .describedAs( + "AbfsInputStream seek+read did not read more data into its buffer") + .containsExactly(data); + assertStatistics(inputStream.getIOStatistics(), 3 * bytesToRead, 3, 3, + TEST_FILE_DATA_SIZE + 2 * bytesToRead); + resetBuffer(abfsIs.getBuffer()); + // Now again do pos read and make sure not any extra data being fetched. + readPos = 0; + Assertions + .assertThat(inputStream.read(readPos, readBuffer, 0, bytesToRead)) + .describedAs( + "AbfsInputStream pread did not read the correct number of bytes") + .isEqualTo(bytesToRead); + Assertions.assertThat(readBuffer) + .describedAs("AbfsInputStream pread did not read correct data") + .containsExactly( + Arrays.copyOfRange(data, readPos, readPos + bytesToRead)); + Assertions + .assertThat(Arrays.copyOfRange( + ((AbfsInputStream) inputStream.getWrappedStream()).getBuffer(), 0, + TEST_FILE_DATA_SIZE)) + .describedAs( + "AbfsInputStream pread read more data into its buffer than expected") + .doesNotContain(data); + assertStatistics(inputStream.getIOStatistics(), 4 * bytesToRead, 4, 4, + TEST_FILE_DATA_SIZE + 3 * bytesToRead); + } finally { + inputStream.close(); + } + } + + private void resetBuffer(byte[] buf) { + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) 0; + } + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java index deca8b355a9f7..4facc10aeff0b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsClient.java @@ -18,19 +18,43 @@ package org.apache.hadoop.fs.azurebfs.services; +import java.io.IOException; +import java.lang.reflect.Field; +import java.net.MalformedURLException; import java.net.URL; +import java.util.List; import java.util.regex.Pattern; -import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; -import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; -import org.apache.hadoop.util.VersionInfo; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.APN_VERSION; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.CLIENT_VERSION; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.DOT; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.EMPTY_STRING; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.FORWARD_SLASH; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.JAVA_VENDOR; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.JAVA_VERSION; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.OS_ARCH; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.OS_NAME; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.OS_VERSION; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.SEMICOLON; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.SINGLE_WHITE_SPACE; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_CLUSTER_NAME; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_CLUSTER_TYPE; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_VALUE_UNKNOWN; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.TEST_CONFIGURATION_FILE_NAME; /** * Test useragent of abfs client. @@ -38,60 +62,339 @@ */ public final class TestAbfsClient { - private final String accountName = "bogusAccountName.dfs.core.windows.net"; + private static final String ACCOUNT_NAME = "bogusAccountName.dfs.core.windows.net"; + private static final String FS_AZURE_USER_AGENT_PREFIX = "Partner Service"; - private void validateUserAgent(String expectedPattern, - URL baseUrl, - AbfsConfiguration config, - boolean includeSSLProvider) - throws AzureBlobFileSystemException { - AbfsClient client = new AbfsClient(baseUrl, null, - config, null, (AccessTokenProvider) null, null); + private final Pattern userAgentStringPattern; + + public TestAbfsClient(){ + StringBuilder regEx = new StringBuilder(); + regEx.append("^"); + regEx.append(APN_VERSION); + regEx.append(SINGLE_WHITE_SPACE); + regEx.append(CLIENT_VERSION); + regEx.append(SINGLE_WHITE_SPACE); + regEx.append("\\("); + regEx.append(System.getProperty(JAVA_VENDOR) + .replaceAll(SINGLE_WHITE_SPACE, EMPTY_STRING)); + regEx.append(SINGLE_WHITE_SPACE); + regEx.append("JavaJRE"); + regEx.append(SINGLE_WHITE_SPACE); + regEx.append(System.getProperty(JAVA_VERSION)); + regEx.append(SEMICOLON); + regEx.append(SINGLE_WHITE_SPACE); + regEx.append(System.getProperty(OS_NAME) + .replaceAll(SINGLE_WHITE_SPACE, EMPTY_STRING)); + regEx.append(SINGLE_WHITE_SPACE); + regEx.append(System.getProperty(OS_VERSION)); + regEx.append(FORWARD_SLASH); + regEx.append(System.getProperty(OS_ARCH)); + regEx.append(SEMICOLON); + regEx.append("([a-zA-Z].*; )?"); // Regex for sslProviderName + regEx.append("([a-zA-Z].*; )?"); // Regex for tokenProvider + regEx.append(" ?"); + regEx.append(".+"); // cluster name + regEx.append(FORWARD_SLASH); + regEx.append(".+"); // cluster type + regEx.append("\\)"); + regEx.append("( .*)?"); // Regex for user agent prefix + regEx.append("$"); + this.userAgentStringPattern = Pattern.compile(regEx.toString()); + } + + private String getUserAgentString(AbfsConfiguration config, + boolean includeSSLProvider) throws MalformedURLException { + AbfsClientContext abfsClientContext = new AbfsClientContextBuilder().build(); + AbfsClient client = new AbfsClient(new URL("https://azure.com"), null, + config, (AccessTokenProvider) null, abfsClientContext); String sslProviderName = null; if (includeSSLProvider) { - sslProviderName = DelegatingSSLSocketFactory.getDefaultFactory().getProviderName(); + sslProviderName = DelegatingSSLSocketFactory.getDefaultFactory() + .getProviderName(); } - String userAgent = client.initializeUserAgent(config, sslProviderName); - Pattern pattern = Pattern.compile(expectedPattern); - Assert.assertTrue("Incorrect User Agent String", - pattern.matcher(userAgent).matches()); + return client.initializeUserAgent(config, sslProviderName); } @Test - public void verifyUnknownUserAgent() throws Exception { - String clientVersion = "Azure Blob FS/" + VersionInfo.getVersion(); - String expectedUserAgentPattern = String.format(clientVersion - + " %s", "\\(JavaJRE ([^\\)]+)\\)"); + public void verifybBasicInfo() throws Exception { final Configuration configuration = new Configuration(); - configuration.unset(ConfigurationKeys.FS_AZURE_USER_AGENT_PREFIX_KEY); - AbfsConfiguration abfsConfiguration = new AbfsConfiguration(configuration, accountName); - validateUserAgent(expectedUserAgentPattern, new URL("http://azure.com"), - abfsConfiguration, false); + configuration.addResource(TEST_CONFIGURATION_FILE_NAME); + AbfsConfiguration abfsConfiguration = new AbfsConfiguration(configuration, + ACCOUNT_NAME); + verifybBasicInfo(getUserAgentString(abfsConfiguration, false)); + } + + private void verifybBasicInfo(String userAgentStr) { + assertThat(userAgentStr) + .describedAs("User-Agent string [" + userAgentStr + + "] should be of the pattern: " + this.userAgentStringPattern.pattern()) + .matches(this.userAgentStringPattern) + .describedAs("User-Agent string should contain java vendor") + .contains(System.getProperty(JAVA_VENDOR) + .replaceAll(SINGLE_WHITE_SPACE, EMPTY_STRING)) + .describedAs("User-Agent string should contain java version") + .contains(System.getProperty(JAVA_VERSION)) + .describedAs("User-Agent string should contain OS name") + .contains(System.getProperty(OS_NAME) + .replaceAll(SINGLE_WHITE_SPACE, EMPTY_STRING)) + .describedAs("User-Agent string should contain OS version") + .contains(System.getProperty(OS_VERSION)) + .describedAs("User-Agent string should contain OS arch") + .contains(System.getProperty(OS_ARCH)); } @Test - public void verifyUserAgent() throws Exception { - String clientVersion = "Azure Blob FS/" + VersionInfo.getVersion(); - String expectedUserAgentPattern = String.format(clientVersion - + " %s", "\\(JavaJRE ([^\\)]+)\\) Partner Service"); + public void verifyUserAgentPrefix() + throws IOException, IllegalAccessException { final Configuration configuration = new Configuration(); - configuration.set(ConfigurationKeys.FS_AZURE_USER_AGENT_PREFIX_KEY, "Partner Service"); - AbfsConfiguration abfsConfiguration = new AbfsConfiguration(configuration, accountName); - validateUserAgent(expectedUserAgentPattern, new URL("http://azure.com"), - abfsConfiguration, false); + configuration.addResource(TEST_CONFIGURATION_FILE_NAME); + configuration.set(ConfigurationKeys.FS_AZURE_USER_AGENT_PREFIX_KEY, FS_AZURE_USER_AGENT_PREFIX); + AbfsConfiguration abfsConfiguration = new AbfsConfiguration(configuration, + ACCOUNT_NAME); + String userAgentStr = getUserAgentString(abfsConfiguration, false); + + verifybBasicInfo(userAgentStr); + assertThat(userAgentStr) + .describedAs("User-Agent string should contain " + FS_AZURE_USER_AGENT_PREFIX) + .contains(FS_AZURE_USER_AGENT_PREFIX); + + configuration.unset(ConfigurationKeys.FS_AZURE_USER_AGENT_PREFIX_KEY); + abfsConfiguration = new AbfsConfiguration(configuration, + ACCOUNT_NAME); + userAgentStr = getUserAgentString(abfsConfiguration, false); + + verifybBasicInfo(userAgentStr); + assertThat(userAgentStr) + .describedAs("User-Agent string should not contain " + FS_AZURE_USER_AGENT_PREFIX) + .doesNotContain(FS_AZURE_USER_AGENT_PREFIX); } @Test - public void verifyUserAgentWithSSLProvider() throws Exception { - String clientVersion = "Azure Blob FS/" + VersionInfo.getVersion(); - String expectedUserAgentPattern = String.format(clientVersion - + " %s", "\\(JavaJRE ([^\\)]+)\\) Partner Service"); + public void verifyUserAgentWithoutSSLProvider() throws Exception { final Configuration configuration = new Configuration(); - configuration.set(ConfigurationKeys.FS_AZURE_USER_AGENT_PREFIX_KEY, "Partner Service"); + configuration.addResource(TEST_CONFIGURATION_FILE_NAME); configuration.set(ConfigurationKeys.FS_AZURE_SSL_CHANNEL_MODE_KEY, DelegatingSSLSocketFactory.SSLChannelMode.Default_JSSE.name()); - AbfsConfiguration abfsConfiguration = new AbfsConfiguration(configuration, accountName); - validateUserAgent(expectedUserAgentPattern, new URL("https://azure.com"), - abfsConfiguration, true); + AbfsConfiguration abfsConfiguration = new AbfsConfiguration(configuration, + ACCOUNT_NAME); + String userAgentStr = getUserAgentString(abfsConfiguration, true); + + verifybBasicInfo(userAgentStr); + assertThat(userAgentStr) + .describedAs("User-Agent string should contain sslProvider") + .contains(DelegatingSSLSocketFactory.getDefaultFactory().getProviderName()); + + userAgentStr = getUserAgentString(abfsConfiguration, false); + + verifybBasicInfo(userAgentStr); + assertThat(userAgentStr) + .describedAs("User-Agent string should not contain sslProvider") + .doesNotContain(DelegatingSSLSocketFactory.getDefaultFactory().getProviderName()); + } + + @Test + public void verifyUserAgentClusterName() throws Exception { + final String clusterName = "testClusterName"; + final Configuration configuration = new Configuration(); + configuration.addResource(TEST_CONFIGURATION_FILE_NAME); + configuration.set(FS_AZURE_CLUSTER_NAME, clusterName); + AbfsConfiguration abfsConfiguration = new AbfsConfiguration(configuration, + ACCOUNT_NAME); + String userAgentStr = getUserAgentString(abfsConfiguration, false); + + verifybBasicInfo(userAgentStr); + assertThat(userAgentStr) + .describedAs("User-Agent string should contain cluster name") + .contains(clusterName); + + configuration.unset(FS_AZURE_CLUSTER_NAME); + abfsConfiguration = new AbfsConfiguration(configuration, + ACCOUNT_NAME); + userAgentStr = getUserAgentString(abfsConfiguration, false); + + verifybBasicInfo(userAgentStr); + assertThat(userAgentStr) + .describedAs("User-Agent string should not contain cluster name") + .doesNotContain(clusterName) + .describedAs("User-Agent string should contain UNKNOWN as cluster name config is absent") + .contains(DEFAULT_VALUE_UNKNOWN); + } + + @Test + public void verifyUserAgentClusterType() throws Exception { + final String clusterType = "testClusterType"; + final Configuration configuration = new Configuration(); + configuration.addResource(TEST_CONFIGURATION_FILE_NAME); + configuration.set(FS_AZURE_CLUSTER_TYPE, clusterType); + AbfsConfiguration abfsConfiguration = new AbfsConfiguration(configuration, + ACCOUNT_NAME); + String userAgentStr = getUserAgentString(abfsConfiguration, false); + + verifybBasicInfo(userAgentStr); + assertThat(userAgentStr) + .describedAs("User-Agent string should contain cluster type") + .contains(clusterType); + + configuration.unset(FS_AZURE_CLUSTER_TYPE); + abfsConfiguration = new AbfsConfiguration(configuration, + ACCOUNT_NAME); + userAgentStr = getUserAgentString(abfsConfiguration, false); + + verifybBasicInfo(userAgentStr); + assertThat(userAgentStr) + .describedAs("User-Agent string should not contain cluster type") + .doesNotContain(clusterType) + .describedAs("User-Agent string should contain UNKNOWN as cluster type config is absent") + .contains(DEFAULT_VALUE_UNKNOWN); + } + + public static AbfsClient createTestClientFromCurrentContext( + AbfsClient baseAbfsClientInstance, + AbfsConfiguration abfsConfig) + throws AzureBlobFileSystemException { + AuthType currentAuthType = abfsConfig.getAuthType( + abfsConfig.getAccountName()); + + AbfsPerfTracker tracker = new AbfsPerfTracker("test", + abfsConfig.getAccountName(), + abfsConfig); + + AbfsClientContext abfsClientContext = + new AbfsClientContextBuilder().withAbfsPerfTracker(tracker) + .withExponentialRetryPolicy( + new ExponentialRetryPolicy(abfsConfig.getMaxIoRetries())) + .build(); + + // Create test AbfsClient + AbfsClient testClient = new AbfsClient( + baseAbfsClientInstance.getBaseUrl(), + (currentAuthType == AuthType.SharedKey + ? new SharedKeyCredentials( + abfsConfig.getAccountName().substring(0, + abfsConfig.getAccountName().indexOf(DOT)), + abfsConfig.getStorageAccountKey()) + : null), + abfsConfig, + (currentAuthType == AuthType.OAuth + ? abfsConfig.getTokenProvider() + : null), + abfsClientContext); + + return testClient; + } + + public static AbfsClient getMockAbfsClient(AbfsClient baseAbfsClientInstance, + AbfsConfiguration abfsConfig) throws Exception { + AuthType currentAuthType = abfsConfig.getAuthType( + abfsConfig.getAccountName()); + + org.junit.Assume.assumeTrue( + (currentAuthType == AuthType.SharedKey) + || (currentAuthType == AuthType.OAuth)); + + AbfsClient client = mock(AbfsClient.class); + AbfsPerfTracker tracker = new AbfsPerfTracker( + "test", + abfsConfig.getAccountName(), + abfsConfig); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.getAuthType()).thenReturn(currentAuthType); + when(client.getRetryPolicy()).thenReturn( + new ExponentialRetryPolicy(1)); + + when(client.createDefaultUriQueryBuilder()).thenCallRealMethod(); + when(client.createRequestUrl(any(), any())).thenCallRealMethod(); + when(client.getAccessToken()).thenCallRealMethod(); + when(client.getSharedKeyCredentials()).thenCallRealMethod(); + when(client.createDefaultHeaders()).thenCallRealMethod(); + + // override baseurl + client = TestAbfsClient.setAbfsClientField(client, "abfsConfiguration", + abfsConfig); + + // override baseurl + client = TestAbfsClient.setAbfsClientField(client, "baseUrl", + baseAbfsClientInstance.getBaseUrl()); + + // override auth provider + if (currentAuthType == AuthType.SharedKey) { + client = TestAbfsClient.setAbfsClientField(client, "sharedKeyCredentials", + new SharedKeyCredentials( + abfsConfig.getAccountName().substring(0, + abfsConfig.getAccountName().indexOf(DOT)), + abfsConfig.getStorageAccountKey())); + } else { + client = TestAbfsClient.setAbfsClientField(client, "tokenProvider", + abfsConfig.getTokenProvider()); + } + + // override user agent + String userAgent = "APN/1.0 Azure Blob FS/3.4.0-SNAPSHOT (PrivateBuild " + + "JavaJRE 1.8.0_252; Linux 5.3.0-59-generic/amd64; openssl-1.0; " + + "UNKNOWN/UNKNOWN) MSFT"; + client = TestAbfsClient.setAbfsClientField(client, "userAgent", userAgent); + + return client; + } + + private static AbfsClient setAbfsClientField( + final AbfsClient client, + final String fieldName, + Object fieldObject) throws Exception { + + Field field = AbfsClient.class.getDeclaredField(fieldName); + field.setAccessible(true); + Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, + field.getModifiers() & ~java.lang.reflect.Modifier.FINAL); + field.set(client, fieldObject); + return client; + } + + /** + * Test helper method to access private createRequestUrl method. + * @param client test AbfsClient instace + * @param path path to generate Url + * @return return store path url + * @throws AzureBlobFileSystemException + */ + public static URL getTestUrl(AbfsClient client, String path) throws + AzureBlobFileSystemException { + final AbfsUriQueryBuilder abfsUriQueryBuilder + = client.createDefaultUriQueryBuilder(); + return client.createRequestUrl(path, abfsUriQueryBuilder.toString()); + } + + /** + * Test helper method to access private createDefaultHeaders method. + * @param client test AbfsClient instance + * @return List of AbfsHttpHeaders + */ + public static List getTestRequestHeaders(AbfsClient client) { + return client.createDefaultHeaders(); + } + + /** + * Test helper method to create an AbfsRestOperation instance. + * @param type RestOpType + * @param client AbfsClient + * @param method HttpMethod + * @param url Test path url + * @param requestHeaders request headers + * @return instance of AbfsRestOperation + */ + public static AbfsRestOperation getRestOp(AbfsRestOperationType type, + AbfsClient client, + String method, + URL url, + List requestHeaders) { + return new AbfsRestOperation( + type, + client, + method, + url, + requestHeaders); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsHttpOperation.java new file mode 100644 index 0000000000000..bb7059a7a5032 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsHttpOperation.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.UnsupportedEncodingException; +import java.net.MalformedURLException; +import java.net.URLEncoder; + +import org.assertj.core.api.Assertions; +import org.junit.Test; + +public class TestAbfsHttpOperation { + + @Test + public void testMaskingAndEncoding() + throws MalformedURLException, UnsupportedEncodingException { + testIfMaskAndEncodeSuccessful("Where sig is the only query param", + "http://www.testurl.net?sig=abcd", "http://www.testurl.net?sig=XXXX"); + + testIfMaskAndEncodeSuccessful("Where sig is the first query param", + "http://www.testurl.net?sig=abcd&abc=xyz", + "http://www.testurl.net?sig=XXXX&abc=xyz"); + + testIfMaskAndEncodeSuccessful( + "Where sig is neither first nor last query param", + "http://www.testurl.net?lmn=abc&sig=abcd&abc=xyz", + "http://www.testurl.net?lmn=abc&sig=XXXX&abc=xyz"); + + testIfMaskAndEncodeSuccessful("Where sig is the last query param", + "http://www.testurl.net?abc=xyz&sig=abcd", + "http://www.testurl.net?abc=xyz&sig=XXXX"); + + testIfMaskAndEncodeSuccessful("Where sig query param is not present", + "http://www.testurl.net?abc=xyz", "http://www.testurl.net?abc=xyz"); + + testIfMaskAndEncodeSuccessful( + "Where sig query param is not present but mysig", + "http://www.testurl.net?abc=xyz&mysig=qwerty", + "http://www.testurl.net?abc=xyz&mysig=qwerty"); + + testIfMaskAndEncodeSuccessful( + "Where sig query param is not present but sigmy", + "http://www.testurl.net?abc=xyz&sigmy=qwerty", + "http://www.testurl.net?abc=xyz&sigmy=qwerty"); + + testIfMaskAndEncodeSuccessful( + "Where sig query param is not present but a " + "value sig", + "http://www.testurl.net?abc=xyz&mnop=sig", + "http://www.testurl.net?abc=xyz&mnop=sig"); + + testIfMaskAndEncodeSuccessful( + "Where sig query param is not present but a " + "value ends with sig", + "http://www.testurl.net?abc=xyz&mnop=abcsig", + "http://www.testurl.net?abc=xyz&mnop=abcsig"); + + testIfMaskAndEncodeSuccessful( + "Where sig query param is not present but a " + "value starts with sig", + "http://www.testurl.net?abc=xyz&mnop=sigabc", + "http://www.testurl.net?abc=xyz&mnop=sigabc"); + } + + private void testIfMaskAndEncodeSuccessful(final String scenario, + final String url, final String expectedMaskedUrl) + throws UnsupportedEncodingException { + + Assertions.assertThat(AbfsHttpOperation.getSignatureMaskedUrl(url)) + .describedAs(url + " (" + scenario + ") after masking should be: " + + expectedMaskedUrl).isEqualTo(expectedMaskedUrl); + + final String expectedMaskedEncodedUrl = URLEncoder + .encode(expectedMaskedUrl, "UTF-8"); + Assertions.assertThat(AbfsHttpOperation.encodedUrlStr(expectedMaskedUrl)) + .describedAs( + url + " (" + scenario + ") after masking and encoding should " + + "be: " + expectedMaskedEncodedUrl) + .isEqualTo(expectedMaskedEncodedUrl); + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java new file mode 100644 index 0000000000000..cbf3d6a2a68ee --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java @@ -0,0 +1,720 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; + +import org.junit.Assert; +import org.junit.Test; +import java.util.Arrays; + +import org.assertj.core.api.Assertions; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; +import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; +import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TimeoutException; +import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus; +import org.apache.hadoop.fs.azurebfs.utils.TestCachedSASToken; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.FORWARD_SLASH; + +/** + * Unit test AbfsInputStream. + */ +public class TestAbfsInputStream extends + AbstractAbfsIntegrationTest { + + private static final int ONE_KB = 1 * 1024; + private static final int TWO_KB = 2 * 1024; + private static final int THREE_KB = 3 * 1024; + private static final int SIXTEEN_KB = 16 * ONE_KB; + private static final int FORTY_EIGHT_KB = 48 * ONE_KB; + private static final int ONE_MB = 1 * 1024 * 1024; + private static final int FOUR_MB = 4 * ONE_MB; + private static final int EIGHT_MB = 8 * ONE_MB; + private static final int TEST_READAHEAD_DEPTH_2 = 2; + private static final int TEST_READAHEAD_DEPTH_4 = 4; + private static final int REDUCED_READ_BUFFER_AGE_THRESHOLD = 3000; // 3 sec + private static final int INCREASED_READ_BUFFER_AGE_THRESHOLD = + REDUCED_READ_BUFFER_AGE_THRESHOLD * 10; // 30 sec + private static final int ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE = 16 * ONE_MB; + + private AbfsRestOperation getMockRestOp() { + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsHttpOperation httpOp = mock(AbfsHttpOperation.class); + when(httpOp.getBytesReceived()).thenReturn(1024L); + when(op.getResult()).thenReturn(httpOp); + when(op.getSasToken()).thenReturn(TestCachedSASToken.getTestCachedSASTokenInstance().get()); + return op; + } + + private AbfsClient getMockAbfsClient() { + // Mock failure for client.read() + AbfsClient client = mock(AbfsClient.class); + AbfsPerfTracker tracker = new AbfsPerfTracker( + "test", + this.getAccountName(), + this.getConfiguration()); + when(client.getAbfsPerfTracker()).thenReturn(tracker); + + return client; + } + + private AbfsInputStream getAbfsInputStream(AbfsClient mockAbfsClient, String fileName) { + AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1); + // Create AbfsInputStream with the client instance + AbfsInputStream inputStream = new AbfsInputStream( + mockAbfsClient, + null, + FORWARD_SLASH + fileName, + THREE_KB, + inputStreamContext.withReadBufferSize(ONE_KB).withReadAheadQueueDepth(10).withReadAheadBlockSize(ONE_KB), + "eTag"); + + inputStream.setCachedSasToken( + TestCachedSASToken.getTestCachedSASTokenInstance()); + + return inputStream; + } + + public AbfsInputStream getAbfsInputStream(AbfsClient abfsClient, + String fileName, + int fileSize, + String eTag, + int readAheadQueueDepth, + int readBufferSize, + boolean alwaysReadBufferSize, + int readAheadBlockSize) { + AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1); + // Create AbfsInputStream with the client instance + AbfsInputStream inputStream = new AbfsInputStream( + abfsClient, + null, + FORWARD_SLASH + fileName, + fileSize, + inputStreamContext.withReadBufferSize(readBufferSize) + .withReadAheadQueueDepth(readAheadQueueDepth) + .withShouldReadBufferSizeAlways(alwaysReadBufferSize) + .withReadAheadBlockSize(readAheadBlockSize), + eTag); + + inputStream.setCachedSasToken( + TestCachedSASToken.getTestCachedSASTokenInstance()); + + return inputStream; + } + + private void queueReadAheads(AbfsInputStream inputStream) { + // Mimic AbfsInputStream readAhead queue requests + ReadBufferManager.getBufferManager() + .queueReadAhead(inputStream, 0, ONE_KB); + ReadBufferManager.getBufferManager() + .queueReadAhead(inputStream, ONE_KB, ONE_KB); + ReadBufferManager.getBufferManager() + .queueReadAhead(inputStream, TWO_KB, TWO_KB); + } + + private void verifyReadCallCount(AbfsClient client, int count) throws + AzureBlobFileSystemException, InterruptedException { + // ReadAhead threads are triggered asynchronously. + // Wait a second before verifying the number of total calls. + Thread.sleep(1000); + verify(client, times(count)).read(any(String.class), any(Long.class), + any(byte[].class), any(Integer.class), any(Integer.class), + any(String.class), any(String.class)); + } + + private void checkEvictedStatus(AbfsInputStream inputStream, int position, boolean expectedToThrowException) + throws Exception { + // Sleep for the eviction threshold time + Thread.sleep(ReadBufferManager.getBufferManager().getThresholdAgeMilliseconds() + 1000); + + // Eviction is done only when AbfsInputStream tries to queue new items. + // 1 tryEvict will remove 1 eligible item. To ensure that the current test buffer + // will get evicted (considering there could be other tests running in parallel), + // call tryEvict for the number of items that are there in completedReadList. + int numOfCompletedReadListItems = ReadBufferManager.getBufferManager().getCompletedReadListSize(); + while (numOfCompletedReadListItems > 0) { + ReadBufferManager.getBufferManager().callTryEvict(); + numOfCompletedReadListItems--; + } + + if (expectedToThrowException) { + intercept(IOException.class, + () -> inputStream.read(position, new byte[ONE_KB], 0, ONE_KB)); + } else { + inputStream.read(position, new byte[ONE_KB], 0, ONE_KB); + } + } + + public TestAbfsInputStream() throws Exception { + super(); + // Reduce thresholdAgeMilliseconds to 3 sec for the tests + ReadBufferManager.getBufferManager().setThresholdAgeMilliseconds(REDUCED_READ_BUFFER_AGE_THRESHOLD); + } + + /** + * This test expects AbfsInputStream to throw the exception that readAhead + * thread received on read. The readAhead thread must be initiated from the + * active read request itself. + * Also checks that the ReadBuffers are evicted as per the ReadBufferManager + * threshold criteria. + * @throws Exception + */ + @Test + public void testFailedReadAhead() throws Exception { + AbfsClient client = getMockAbfsClient(); + AbfsRestOperation successOp = getMockRestOp(); + + // Stub : + // Read request leads to 3 readahead calls: Fail all 3 readahead-client.read() + // Actual read request fails with the failure in readahead thread + doThrow(new TimeoutException("Internal Server error for RAH-Thread-X")) + .doThrow(new TimeoutException("Internal Server error for RAH-Thread-Y")) + .doThrow(new TimeoutException("Internal Server error RAH-Thread-Z")) + .doReturn(successOp) // Any extra calls to read, pass it. + .when(client) + .read(any(String.class), any(Long.class), any(byte[].class), + any(Integer.class), any(Integer.class), any(String.class), + any(String.class)); + + AbfsInputStream inputStream = getAbfsInputStream(client, "testFailedReadAhead.txt"); + + // Scenario: ReadAhead triggered from current active read call failed + // Before the change to return exception from readahead buffer, + // AbfsInputStream would have triggered an extra readremote on noticing + // data absent in readahead buffers + // In this test, a read should trigger 3 client.read() calls as file is 3 KB + // and readahead buffer size set in AbfsInputStream is 1 KB + // There should only be a total of 3 client.read() in this test. + intercept(IOException.class, + () -> inputStream.read(new byte[ONE_KB])); + + // Only the 3 readAhead threads should have triggered client.read + verifyReadCallCount(client, 3); + + // Stub returns success for the 4th read request, if ReadBuffers still + // persisted, ReadAheadManager getBlock would have returned exception. + checkEvictedStatus(inputStream, 0, false); + } + + @Test + public void testFailedReadAheadEviction() throws Exception { + AbfsClient client = getMockAbfsClient(); + AbfsRestOperation successOp = getMockRestOp(); + ReadBufferManager.setThresholdAgeMilliseconds(INCREASED_READ_BUFFER_AGE_THRESHOLD); + // Stub : + // Read request leads to 3 readahead calls: Fail all 3 readahead-client.read() + // Actual read request fails with the failure in readahead thread + doThrow(new TimeoutException("Internal Server error")) + .when(client) + .read(any(String.class), any(Long.class), any(byte[].class), + any(Integer.class), any(Integer.class), any(String.class), + any(String.class)); + + AbfsInputStream inputStream = getAbfsInputStream(client, "testFailedReadAheadEviction.txt"); + + // Add a failed buffer to completed queue and set to no free buffers to read ahead. + ReadBuffer buff = new ReadBuffer(); + buff.setStatus(ReadBufferStatus.READ_FAILED); + ReadBufferManager.getBufferManager().testMimicFullUseAndAddFailedBuffer(buff); + + // if read failed buffer eviction is tagged as a valid eviction, it will lead to + // wrong assumption of queue logic that a buffer is freed up and can lead to : + // java.util.EmptyStackException + // at java.util.Stack.peek(Stack.java:102) + // at java.util.Stack.pop(Stack.java:84) + // at org.apache.hadoop.fs.azurebfs.services.ReadBufferManager.queueReadAhead + ReadBufferManager.getBufferManager().queueReadAhead(inputStream, 0, ONE_KB); + } + + /** + * + * The test expects AbfsInputStream to initiate a remote read request for + * the request offset and length when previous read ahead on the offset had failed. + * Also checks that the ReadBuffers are evicted as per the ReadBufferManager + * threshold criteria. + * @throws Exception + */ + @Test + public void testOlderReadAheadFailure() throws Exception { + AbfsClient client = getMockAbfsClient(); + AbfsRestOperation successOp = getMockRestOp(); + + // Stub : + // First Read request leads to 3 readahead calls: Fail all 3 readahead-client.read() + // A second read request will see that readahead had failed for data in + // the requested offset range and also that its is an older readahead request. + // So attempt a new read only for the requested range. + doThrow(new TimeoutException("Internal Server error for RAH-X")) + .doThrow(new TimeoutException("Internal Server error for RAH-Y")) + .doThrow(new TimeoutException("Internal Server error for RAH-Z")) + .doReturn(successOp) // pass the read for second read request + .doReturn(successOp) // pass success for post eviction test + .when(client) + .read(any(String.class), any(Long.class), any(byte[].class), + any(Integer.class), any(Integer.class), any(String.class), + any(String.class)); + + AbfsInputStream inputStream = getAbfsInputStream(client, "testOlderReadAheadFailure.txt"); + + // First read request that fails as the readahead triggered from this request failed. + intercept(IOException.class, + () -> inputStream.read(new byte[ONE_KB])); + + // Only the 3 readAhead threads should have triggered client.read + verifyReadCallCount(client, 3); + + // Sleep for thresholdAgeMs so that the read ahead buffer qualifies for being old. + Thread.sleep(ReadBufferManager.getBufferManager().getThresholdAgeMilliseconds()); + + // Second read request should retry the read (and not issue any new readaheads) + inputStream.read(ONE_KB, new byte[ONE_KB], 0, ONE_KB); + + // Once created, mock will remember all interactions. So total number of read + // calls will be one more from earlier (there is a reset mock which will reset the + // count, but the mock stub is erased as well which needs AbsInputStream to be recreated, + // which beats the purpose) + verifyReadCallCount(client, 4); + + // Stub returns success for the 5th read request, if ReadBuffers still + // persisted request would have failed for position 0. + checkEvictedStatus(inputStream, 0, false); + } + + /** + * The test expects AbfsInputStream to utilize any data read ahead for + * requested offset and length. + * @throws Exception + */ + @Test + public void testSuccessfulReadAhead() throws Exception { + // Mock failure for client.read() + AbfsClient client = getMockAbfsClient(); + + // Success operation mock + AbfsRestOperation op = getMockRestOp(); + + // Stub : + // Pass all readAheads and fail the post eviction request to + // prove ReadAhead buffer is used + // for post eviction check, fail all read aheads + doReturn(op) + .doReturn(op) + .doReturn(op) + .doThrow(new TimeoutException("Internal Server error for RAH-X")) + .doThrow(new TimeoutException("Internal Server error for RAH-Y")) + .doThrow(new TimeoutException("Internal Server error for RAH-Z")) + .when(client) + .read(any(String.class), any(Long.class), any(byte[].class), + any(Integer.class), any(Integer.class), any(String.class), + any(String.class)); + + AbfsInputStream inputStream = getAbfsInputStream(client, "testSuccessfulReadAhead.txt"); + int beforeReadCompletedListSize = ReadBufferManager.getBufferManager().getCompletedReadListSize(); + + // First read request that triggers readAheads. + inputStream.read(new byte[ONE_KB]); + + // Only the 3 readAhead threads should have triggered client.read + verifyReadCallCount(client, 3); + int newAdditionsToCompletedRead = + ReadBufferManager.getBufferManager().getCompletedReadListSize() + - beforeReadCompletedListSize; + // read buffer might be dumped if the ReadBufferManager getblock preceded + // the action of buffer being picked for reading from readaheadqueue, so that + // inputstream can proceed with read and not be blocked on readahead thread + // availability. So the count of buffers in completedReadQueue for the stream + // can be same or lesser than the requests triggered to queue readahead. + Assertions.assertThat(newAdditionsToCompletedRead) + .describedAs( + "New additions to completed reads should be same or less than as number of readaheads") + .isLessThanOrEqualTo(3); + + // Another read request whose requested data is already read ahead. + inputStream.read(ONE_KB, new byte[ONE_KB], 0, ONE_KB); + + // Once created, mock will remember all interactions. + // As the above read should not have triggered any server calls, total + // number of read calls made at this point will be same as last. + verifyReadCallCount(client, 3); + + // Stub will throw exception for client.read() for 4th and later calls + // if not using the read-ahead buffer exception will be thrown on read + checkEvictedStatus(inputStream, 0, true); + } + + /** + * This test expects ReadAheadManager to throw exception if the read ahead + * thread had failed within the last thresholdAgeMilliseconds. + * Also checks that the ReadBuffers are evicted as per the ReadBufferManager + * threshold criteria. + * @throws Exception + */ + @Test + public void testReadAheadManagerForFailedReadAhead() throws Exception { + AbfsClient client = getMockAbfsClient(); + AbfsRestOperation successOp = getMockRestOp(); + + // Stub : + // Read request leads to 3 readahead calls: Fail all 3 readahead-client.read() + // Actual read request fails with the failure in readahead thread + doThrow(new TimeoutException("Internal Server error for RAH-Thread-X")) + .doThrow(new TimeoutException("Internal Server error for RAH-Thread-Y")) + .doThrow(new TimeoutException("Internal Server error RAH-Thread-Z")) + .doReturn(successOp) // Any extra calls to read, pass it. + .when(client) + .read(any(String.class), any(Long.class), any(byte[].class), + any(Integer.class), any(Integer.class), any(String.class), + any(String.class)); + + AbfsInputStream inputStream = getAbfsInputStream(client, "testReadAheadManagerForFailedReadAhead.txt"); + + queueReadAheads(inputStream); + + // AbfsInputStream Read would have waited for the read-ahead for the requested offset + // as we are testing from ReadAheadManager directly, sleep for a sec to + // get the read ahead threads to complete + Thread.sleep(1000); + + // if readAhead failed for specific offset, getBlock should + // throw exception from the ReadBuffer that failed within last thresholdAgeMilliseconds sec + intercept(IOException.class, + () -> ReadBufferManager.getBufferManager().getBlock( + inputStream, + 0, + ONE_KB, + new byte[ONE_KB])); + + // Only the 3 readAhead threads should have triggered client.read + verifyReadCallCount(client, 3); + + // Stub returns success for the 4th read request, if ReadBuffers still + // persisted, ReadAheadManager getBlock would have returned exception. + checkEvictedStatus(inputStream, 0, false); + } + + /** + * The test expects ReadAheadManager to return 0 receivedBytes when previous + * read ahead on the offset had failed and not throw exception received then. + * Also checks that the ReadBuffers are evicted as per the ReadBufferManager + * threshold criteria. + * @throws Exception + */ + @Test + public void testReadAheadManagerForOlderReadAheadFailure() throws Exception { + AbfsClient client = getMockAbfsClient(); + AbfsRestOperation successOp = getMockRestOp(); + + // Stub : + // First Read request leads to 3 readahead calls: Fail all 3 readahead-client.read() + // A second read request will see that readahead had failed for data in + // the requested offset range but also that its is an older readahead request. + // System issue could have resolved by now, so attempt a new read only for the requested range. + doThrow(new TimeoutException("Internal Server error for RAH-X")) + .doThrow(new TimeoutException("Internal Server error for RAH-X")) + .doThrow(new TimeoutException("Internal Server error for RAH-X")) + .doReturn(successOp) // pass the read for second read request + .doReturn(successOp) // pass success for post eviction test + .when(client) + .read(any(String.class), any(Long.class), any(byte[].class), + any(Integer.class), any(Integer.class), any(String.class), + any(String.class)); + + AbfsInputStream inputStream = getAbfsInputStream(client, "testReadAheadManagerForOlderReadAheadFailure.txt"); + + queueReadAheads(inputStream); + + // AbfsInputStream Read would have waited for the read-ahead for the requested offset + // as we are testing from ReadAheadManager directly, sleep for thresholdAgeMilliseconds so that + // read buffer qualifies for to be an old buffer + Thread.sleep(ReadBufferManager.getBufferManager().getThresholdAgeMilliseconds()); + + // Only the 3 readAhead threads should have triggered client.read + verifyReadCallCount(client, 3); + + // getBlock from a new read request should return 0 if there is a failure + // 30 sec before in read ahead buffer for respective offset. + int bytesRead = ReadBufferManager.getBufferManager().getBlock( + inputStream, + ONE_KB, + ONE_KB, + new byte[ONE_KB]); + Assert.assertEquals("bytesRead should be zero when previously read " + + "ahead buffer had failed", 0, bytesRead); + + // Stub returns success for the 5th read request, if ReadBuffers still + // persisted request would have failed for position 0. + checkEvictedStatus(inputStream, 0, false); + } + + /** + * The test expects ReadAheadManager to return data from previously read + * ahead data of same offset. + * @throws Exception + */ + @Test + public void testReadAheadManagerForSuccessfulReadAhead() throws Exception { + // Mock failure for client.read() + AbfsClient client = getMockAbfsClient(); + + // Success operation mock + AbfsRestOperation op = getMockRestOp(); + + // Stub : + // Pass all readAheads and fail the post eviction request to + // prove ReadAhead buffer is used + doReturn(op) + .doReturn(op) + .doReturn(op) + .doThrow(new TimeoutException("Internal Server error for RAH-X")) // for post eviction request + .doThrow(new TimeoutException("Internal Server error for RAH-Y")) + .doThrow(new TimeoutException("Internal Server error for RAH-Z")) + .when(client) + .read(any(String.class), any(Long.class), any(byte[].class), + any(Integer.class), any(Integer.class), any(String.class), + any(String.class)); + + AbfsInputStream inputStream = getAbfsInputStream(client, "testSuccessfulReadAhead.txt"); + + queueReadAheads(inputStream); + + // AbfsInputStream Read would have waited for the read-ahead for the requested offset + // as we are testing from ReadAheadManager directly, sleep for a sec to + // get the read ahead threads to complete + Thread.sleep(1000); + + // Only the 3 readAhead threads should have triggered client.read + verifyReadCallCount(client, 3); + + // getBlock for a new read should return the buffer read-ahead + int bytesRead = ReadBufferManager.getBufferManager().getBlock( + inputStream, + ONE_KB, + ONE_KB, + new byte[ONE_KB]); + + Assert.assertTrue("bytesRead should be non-zero from the " + + "buffer that was read-ahead", bytesRead > 0); + + // Once created, mock will remember all interactions. + // As the above read should not have triggered any server calls, total + // number of read calls made at this point will be same as last. + verifyReadCallCount(client, 3); + + // Stub will throw exception for client.read() for 4th and later calls + // if not using the read-ahead buffer exception will be thrown on read + checkEvictedStatus(inputStream, 0, true); + } + + /** + * Test readahead with different config settings for request request size and + * readAhead block size + * @throws Exception + */ + @Test + public void testDiffReadRequestSizeAndRAHBlockSize() throws Exception { + // Set requestRequestSize = 4MB and readAheadBufferSize=8MB + resetReadBufferManager(FOUR_MB, INCREASED_READ_BUFFER_AGE_THRESHOLD); + testReadAheadConfigs(FOUR_MB, TEST_READAHEAD_DEPTH_4, false, EIGHT_MB); + + // Test for requestRequestSize =16KB and readAheadBufferSize=16KB + resetReadBufferManager(SIXTEEN_KB, INCREASED_READ_BUFFER_AGE_THRESHOLD); + AbfsInputStream inputStream = testReadAheadConfigs(SIXTEEN_KB, + TEST_READAHEAD_DEPTH_2, true, SIXTEEN_KB); + testReadAheads(inputStream, SIXTEEN_KB, SIXTEEN_KB); + + // Test for requestRequestSize =16KB and readAheadBufferSize=48KB + resetReadBufferManager(FORTY_EIGHT_KB, INCREASED_READ_BUFFER_AGE_THRESHOLD); + inputStream = testReadAheadConfigs(SIXTEEN_KB, TEST_READAHEAD_DEPTH_2, true, + FORTY_EIGHT_KB); + testReadAheads(inputStream, SIXTEEN_KB, FORTY_EIGHT_KB); + + // Test for requestRequestSize =48KB and readAheadBufferSize=16KB + resetReadBufferManager(FORTY_EIGHT_KB, INCREASED_READ_BUFFER_AGE_THRESHOLD); + inputStream = testReadAheadConfigs(FORTY_EIGHT_KB, TEST_READAHEAD_DEPTH_2, + true, + SIXTEEN_KB); + testReadAheads(inputStream, FORTY_EIGHT_KB, SIXTEEN_KB); + } + + + private void testReadAheads(AbfsInputStream inputStream, + int readRequestSize, + int readAheadRequestSize) + throws Exception { + if (readRequestSize > readAheadRequestSize) { + readAheadRequestSize = readRequestSize; + } + + byte[] firstReadBuffer = new byte[readRequestSize]; + byte[] secondReadBuffer = new byte[readAheadRequestSize]; + + // get the expected bytes to compare + byte[] expectedFirstReadAheadBufferContents = new byte[readRequestSize]; + byte[] expectedSecondReadAheadBufferContents = new byte[readAheadRequestSize]; + getExpectedBufferData(0, readRequestSize, expectedFirstReadAheadBufferContents); + getExpectedBufferData(readRequestSize, readAheadRequestSize, + expectedSecondReadAheadBufferContents); + + Assertions.assertThat(inputStream.read(firstReadBuffer, 0, readRequestSize)) + .describedAs("Read should be of exact requested size") + .isEqualTo(readRequestSize); + + assertTrue("Data mismatch found in RAH1", + Arrays.equals(firstReadBuffer, + expectedFirstReadAheadBufferContents)); + + Assertions.assertThat(inputStream.read(secondReadBuffer, 0, readAheadRequestSize)) + .describedAs("Read should be of exact requested size") + .isEqualTo(readAheadRequestSize); + + assertTrue("Data mismatch found in RAH2", + Arrays.equals(secondReadBuffer, + expectedSecondReadAheadBufferContents)); + } + + public AbfsInputStream testReadAheadConfigs(int readRequestSize, + int readAheadQueueDepth, + boolean alwaysReadBufferSizeEnabled, + int readAheadBlockSize) throws Exception { + Configuration + config = new Configuration( + this.getRawConfiguration()); + config.set("fs.azure.read.request.size", Integer.toString(readRequestSize)); + config.set("fs.azure.readaheadqueue.depth", + Integer.toString(readAheadQueueDepth)); + config.set("fs.azure.read.alwaysReadBufferSize", + Boolean.toString(alwaysReadBufferSizeEnabled)); + config.set("fs.azure.read.readahead.blocksize", + Integer.toString(readAheadBlockSize)); + if (readRequestSize > readAheadBlockSize) { + readAheadBlockSize = readRequestSize; + } + + Path testPath = new Path( + "/testReadAheadConfigs"); + final AzureBlobFileSystem fs = createTestFile(testPath, + ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE, config); + byte[] byteBuffer = new byte[ONE_MB]; + AbfsInputStream inputStream = this.getAbfsStore(fs) + .openFileForRead(testPath, null); + + Assertions.assertThat(inputStream.getBufferSize()) + .describedAs("Unexpected AbfsInputStream buffer size") + .isEqualTo(readRequestSize); + + Assertions.assertThat(inputStream.getReadAheadQueueDepth()) + .describedAs("Unexpected ReadAhead queue depth") + .isEqualTo(readAheadQueueDepth); + + Assertions.assertThat(inputStream.shouldAlwaysReadBufferSize()) + .describedAs("Unexpected AlwaysReadBufferSize settings") + .isEqualTo(alwaysReadBufferSizeEnabled); + + Assertions.assertThat(ReadBufferManager.getBufferManager().getReadAheadBlockSize()) + .describedAs("Unexpected readAhead block size") + .isEqualTo(readAheadBlockSize); + + return inputStream; + } + + private void getExpectedBufferData(int offset, int length, byte[] b) { + boolean startFillingIn = false; + int indexIntoBuffer = 0; + char character = 'a'; + + for (int i = 0; i < (offset + length); i++) { + if (i == offset) { + startFillingIn = true; + } + + if ((startFillingIn) && (indexIntoBuffer < length)) { + b[indexIntoBuffer] = (byte) character; + indexIntoBuffer++; + } + + character = (character == 'z') ? 'a' : (char) ((int) character + 1); + } + } + + private AzureBlobFileSystem createTestFile(Path testFilePath, long testFileSize, + Configuration config) throws Exception { + AzureBlobFileSystem fs; + + if (config == null) { + fs = this.getFileSystem(); + } else { + final AzureBlobFileSystem currentFs = getFileSystem(); + fs = (AzureBlobFileSystem) FileSystem.newInstance(currentFs.getUri(), + config); + } + + if (fs.exists(testFilePath)) { + FileStatus status = fs.getFileStatus(testFilePath); + if (status.getLen() >= testFileSize) { + return fs; + } + } + + byte[] buffer = new byte[EIGHT_MB]; + char character = 'a'; + for (int i = 0; i < buffer.length; i++) { + buffer[i] = (byte) character; + character = (character == 'z') ? 'a' : (char) ((int) character + 1); + } + + try (FSDataOutputStream outputStream = fs.create(testFilePath)) { + int bytesWritten = 0; + while (bytesWritten < testFileSize) { + outputStream.write(buffer); + bytesWritten += buffer.length; + } + } + + Assertions.assertThat(fs.getFileStatus(testFilePath).getLen()) + .describedAs("File not created of expected size") + .isEqualTo(testFileSize); + + return fs; + } + + private void resetReadBufferManager(int bufferSize, int threshold) { + ReadBufferManager.getBufferManager() + .testResetReadBufferManager(bufferSize, threshold); + // Trigger GC as aggressive recreation of ReadBufferManager buffers + // by successive tests can lead to OOM based on the dev VM/machine capacity. + System.gc(); + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java new file mode 100644 index 0000000000000..1e6b8efe6d9d2 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsOutputStream.java @@ -0,0 +1,418 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Random; + +import org.junit.Test; + +import org.mockito.ArgumentCaptor; + +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters; +import org.apache.hadoop.conf.Configuration; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.refEq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.anyLong; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode.APPEND_MODE; + +public final class TestAbfsOutputStream { + + private static final int BUFFER_SIZE = 4096; + private static final int WRITE_SIZE = 1000; + private static final String PATH = "~/testpath"; + private final String globalKey = "fs.azure.configuration"; + private final String accountName1 = "account1"; + private final String accountKey1 = globalKey + "." + accountName1; + private final String accountValue1 = "one"; + + private AbfsOutputStreamContext populateAbfsOutputStreamContext(int writeBufferSize, + boolean isFlushEnabled, + boolean disableOutputStreamFlush, + boolean isAppendBlob) throws IOException, IllegalAccessException { + AbfsConfiguration abfsConf = new AbfsConfiguration(new Configuration(), + accountName1); + return new AbfsOutputStreamContext(2) + .withWriteBufferSize(writeBufferSize) + .enableFlush(isFlushEnabled) + .disableOutputStreamFlush(disableOutputStreamFlush) + .withStreamStatistics(new AbfsOutputStreamStatisticsImpl()) + .withAppendBlob(isAppendBlob) + .withWriteMaxConcurrentRequestCount(abfsConf.getWriteMaxConcurrentRequestCount()) + .withMaxWriteRequestsToQueue(abfsConf.getMaxWriteRequestsToQueue()) + .build(); + } + + /** + * The test verifies OutputStream shortwrite case(2000bytes write followed by flush, hflush, hsync) is making correct HTTP calls to the server + */ + @Test + public void verifyShortWriteRequest() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[WRITE_SIZE]; + new Random().nextBytes(b); + out.write(b); + out.hsync(); + + final byte[] b1 = new byte[2*WRITE_SIZE]; + new Random().nextBytes(b1); + out.write(b1); + out.flush(); + out.hflush(); + + out.hsync(); + + AppendRequestParameters firstReqParameters = new AppendRequestParameters( + 0, 0, WRITE_SIZE, APPEND_MODE, false); + AppendRequestParameters secondReqParameters = new AppendRequestParameters( + WRITE_SIZE, 0, 2 * WRITE_SIZE, APPEND_MODE, false); + + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + // confirm there were only 2 invocations in all + verify(client, times(2)).append( + eq(PATH), any(byte[].class), any(), any()); + } + + /** + * The test verifies OutputStream Write of WRITE_SIZE(1000 bytes) followed by a close is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequest() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[WRITE_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 5; i++) { + out.write(b); + } + out.close(); + + AppendRequestParameters firstReqParameters = new AppendRequestParameters( + 0, 0, BUFFER_SIZE, APPEND_MODE, false); + AppendRequestParameters secondReqParameters = new AppendRequestParameters( + BUFFER_SIZE, 0, 5*WRITE_SIZE-BUFFER_SIZE, APPEND_MODE, false); + + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + // confirm there were only 2 invocations in all + verify(client, times(2)).append( + eq(PATH), any(byte[].class), any(), any()); + + ArgumentCaptor acFlushPath = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acFlushPosition = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acFlushRetainUnCommittedData = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushClose = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), + acFlushSASToken.capture()); + assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues()); + assertThat(Arrays.asList(Long.valueOf(5*WRITE_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues()); + assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); + assertThat(Arrays.asList(true)).describedAs("Close flag").isEqualTo(acFlushClose.getAllValues()); + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) followed by a close is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSizeAndClose() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsHttpOperation httpOp = mock(AbfsHttpOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(op.getSasToken()).thenReturn("testToken"); + when(op.getResult()).thenReturn(httpOp); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + out.close(); + + AppendRequestParameters firstReqParameters = new AppendRequestParameters( + 0, 0, BUFFER_SIZE, APPEND_MODE, false); + AppendRequestParameters secondReqParameters = new AppendRequestParameters( + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false); + + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + // confirm there were only 2 invocations in all + verify(client, times(2)).append( + eq(PATH), any(byte[].class), any(), any()); + + ArgumentCaptor acFlushPath = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acFlushPosition = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acFlushRetainUnCommittedData = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushClose = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), + acFlushSASToken.capture()); + assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues()); + assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues()); + assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); + assertThat(Arrays.asList(true)).describedAs("Close flag").isEqualTo(acFlushClose.getAllValues()); + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSize() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsHttpOperation httpOp = mock(AbfsHttpOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + when(op.getSasToken()).thenReturn("testToken"); + when(op.getResult()).thenReturn(httpOp); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + Thread.sleep(1000); + + AppendRequestParameters firstReqParameters = new AppendRequestParameters( + 0, 0, BUFFER_SIZE, APPEND_MODE, false); + AppendRequestParameters secondReqParameters = new AppendRequestParameters( + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false); + + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + // confirm there were only 2 invocations in all + verify(client, times(2)).append( + eq(PATH), any(byte[].class), any(), any()); + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) on a AppendBlob based stream is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSizeWithAppendBlob() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, true)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + Thread.sleep(1000); + + AppendRequestParameters firstReqParameters = new AppendRequestParameters( + 0, 0, BUFFER_SIZE, APPEND_MODE, true); + AppendRequestParameters secondReqParameters = new AppendRequestParameters( + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, true); + + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + // confirm there were only 2 invocations in all + verify(client, times(2)).append( + eq(PATH), any(byte[].class), any(), any()); + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) followed by a hflush call is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + when(op.getSasToken()).thenReturn(""); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + out.hflush(); + + AppendRequestParameters firstReqParameters = new AppendRequestParameters( + 0, 0, BUFFER_SIZE, APPEND_MODE, false); + AppendRequestParameters secondReqParameters = new AppendRequestParameters( + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false); + + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + // confirm there were only 2 invocations in all + verify(client, times(2)).append( + eq(PATH), any(byte[].class), any(), any()); + + ArgumentCaptor acFlushPath = ArgumentCaptor.forClass(String.class); + ArgumentCaptor acFlushPosition = ArgumentCaptor.forClass(Long.class); + ArgumentCaptor acFlushRetainUnCommittedData = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushClose = ArgumentCaptor.forClass(Boolean.class); + ArgumentCaptor acFlushSASToken = ArgumentCaptor.forClass(String.class); + + verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(), + acFlushSASToken.capture()); + assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues()); + assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues()); + assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues()); + assertThat(Arrays.asList(false)).describedAs("Close flag").isEqualTo(acFlushClose.getAllValues()); + } + + /** + * The test verifies OutputStream Write of BUFFER_SIZE(4KB) followed by a flush call is making correct HTTP calls to the server + */ + @Test + public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception { + + AbfsClient client = mock(AbfsClient.class); + AbfsRestOperation op = mock(AbfsRestOperation.class); + AbfsConfiguration abfsConf; + final Configuration conf = new Configuration(); + conf.set(accountKey1, accountValue1); + abfsConf = new AbfsConfiguration(conf, accountName1); + AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf); + when(client.getAbfsPerfTracker()).thenReturn(tracker); + when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op); + when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op); + + AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0, + populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false)); + final byte[] b = new byte[BUFFER_SIZE]; + new Random().nextBytes(b); + + for (int i = 0; i < 2; i++) { + out.write(b); + } + Thread.sleep(1000); + out.flush(); + Thread.sleep(1000); + + AppendRequestParameters firstReqParameters = new AppendRequestParameters( + 0, 0, BUFFER_SIZE, APPEND_MODE, false); + AppendRequestParameters secondReqParameters = new AppendRequestParameters( + BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false); + + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(firstReqParameters), any()); + verify(client, times(1)).append( + eq(PATH), any(byte[].class), refEq(secondReqParameters), any()); + // confirm there were only 2 invocations in all + verify(client, times(2)).append( + eq(PATH), any(byte[].class), any(), any()); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java index 4f4210287ce75..191d6e77ae09b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsPerfTracker.java @@ -34,6 +34,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; + import static org.assertj.core.api.Assertions.assertThat; /** @@ -405,4 +407,15 @@ private void verifyNoException(AbfsPerfTracker abfsPerfTracker) throws Exception tracker13.registerResult(httpOperation).registerSuccess(false).registerAggregates(Instant.MIN, TEST_AGGREGATE_COUNT); } } + + /** + * Test helper method to create an AbfsPerfTracker instance. + * @param abfsConfig active test abfs config + * @return instance of AbfsPerfTracker + */ + public static AbfsPerfTracker getAPerfTrackerInstance(AbfsConfiguration abfsConfig) { + AbfsPerfTracker tracker = new AbfsPerfTracker("test", + abfsConfig.getAccountName(), abfsConfig); + return tracker; + } } \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAzureADAuthenticator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAzureADAuthenticator.java new file mode 100644 index 0000000000000..8e79288cf6e7d --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAzureADAuthenticator.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.IOException; + +import org.assertj.core.api.Assertions; +import org.junit.Test; + +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; + +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_OAUTH_TOKEN_FETCH_RETRY_COUNT; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF; +import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_ATTEMPTS; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF_INTERVAL; +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF_INTERVAL; +import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ACCOUNT_NAME; + +public class TestAzureADAuthenticator extends AbstractAbfsIntegrationTest { + + private static final int TEST_RETRY_COUNT = 10; + private static final int TEST_MIN_BACKOFF = 20; + private static final int TEST_MAX_BACKOFF = 30; + private static final int TEST_DELTA_BACKOFF = 40; + + public TestAzureADAuthenticator() throws Exception { + super(); + } + + @Test + public void testDefaultOAuthTokenFetchRetryPolicy() throws Exception { + getConfiguration().unset(AZURE_OAUTH_TOKEN_FETCH_RETRY_COUNT); + getConfiguration().unset(AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF); + getConfiguration().unset(AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF); + getConfiguration().unset(AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF); + + String accountName = getConfiguration().get(FS_AZURE_ACCOUNT_NAME); + AbfsConfiguration abfsConfig = new AbfsConfiguration(getRawConfiguration(), + accountName); + + ExponentialRetryPolicy retryPolicy = abfsConfig + .getOauthTokenFetchRetryPolicy(); + + Assertions.assertThat(retryPolicy.getRetryCount()).describedAs( + "retryCount should be the default value {} as the same " + + "is not configured", + DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_ATTEMPTS) + .isEqualTo(DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_ATTEMPTS); + Assertions.assertThat(retryPolicy.getMinBackoff()).describedAs( + "minBackOff should be the default value {} as the same is " + + "not configured", + DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF_INTERVAL) + .isEqualTo(DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF_INTERVAL); + Assertions.assertThat(retryPolicy.getMaxBackoff()).describedAs( + "maxBackOff should be the default value {} as the same is " + + "not configured", + DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF_INTERVAL) + .isEqualTo(DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF_INTERVAL); + Assertions.assertThat(retryPolicy.getDeltaBackoff()).describedAs( + "deltaBackOff should be the default value {} as the same " + "is " + + "not configured", + DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF) + .isEqualTo(DEFAULT_AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF); + + } + + @Test + public void testOAuthTokenFetchRetryPolicy() + throws IOException, IllegalAccessException { + + getConfiguration() + .set(AZURE_OAUTH_TOKEN_FETCH_RETRY_COUNT, String.valueOf(TEST_RETRY_COUNT)); + getConfiguration().set(AZURE_OAUTH_TOKEN_FETCH_RETRY_MIN_BACKOFF, + String.valueOf(TEST_MIN_BACKOFF)); + getConfiguration().set(AZURE_OAUTH_TOKEN_FETCH_RETRY_MAX_BACKOFF, + String.valueOf(TEST_MAX_BACKOFF)); + getConfiguration().set(AZURE_OAUTH_TOKEN_FETCH_RETRY_DELTA_BACKOFF, + String.valueOf(TEST_DELTA_BACKOFF)); + + String accountName = getConfiguration().get(FS_AZURE_ACCOUNT_NAME); + AbfsConfiguration abfsConfig = new AbfsConfiguration(getRawConfiguration(), + accountName); + + ExponentialRetryPolicy retryPolicy = abfsConfig + .getOauthTokenFetchRetryPolicy(); + + Assertions.assertThat(retryPolicy.getRetryCount()) + .describedAs("retryCount should be {}", TEST_RETRY_COUNT) + .isEqualTo(TEST_RETRY_COUNT); + Assertions.assertThat(retryPolicy.getMinBackoff()) + .describedAs("minBackOff should be {}", TEST_MIN_BACKOFF) + .isEqualTo(TEST_MIN_BACKOFF); + Assertions.assertThat(retryPolicy.getMaxBackoff()) + .describedAs("maxBackOff should be {}", TEST_MAX_BACKOFF) + .isEqualTo(TEST_MAX_BACKOFF); + Assertions.assertThat(retryPolicy.getDeltaBackoff()) + .describedAs("deltaBackOff should be {}", TEST_DELTA_BACKOFF) + .isEqualTo(TEST_DELTA_BACKOFF); + } + +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java new file mode 100644 index 0000000000000..e10419f148b25 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestExponentialRetryPolicy.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.util.Random; + +import org.junit.Assert; +import org.junit.Test; + +import org.apache.hadoop.conf.Configuration; + +import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; +import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; + +/** + * Unit test TestExponentialRetryPolicy. + */ +public class TestExponentialRetryPolicy extends AbstractAbfsIntegrationTest { + + private final int maxRetryCount = 30; + private final int noRetryCount = 0; + private final int retryCount = new Random().nextInt(maxRetryCount); + private final int retryCountBeyondMax = maxRetryCount + 1; + + + public TestExponentialRetryPolicy() throws Exception { + super(); + } + + @Test + public void testDifferentMaxIORetryCount() throws Exception { + AbfsConfiguration abfsConfig = getAbfsConfig(); + abfsConfig.setMaxIoRetries(noRetryCount); + testMaxIOConfig(abfsConfig); + abfsConfig.setMaxIoRetries(retryCount); + testMaxIOConfig(abfsConfig); + abfsConfig.setMaxIoRetries(retryCountBeyondMax); + testMaxIOConfig(abfsConfig); + } + + @Test + public void testDefaultMaxIORetryCount() throws Exception { + AbfsConfiguration abfsConfig = getAbfsConfig(); + Assert.assertTrue( + String.format("default maxIORetry count is %s.", maxRetryCount), + abfsConfig.getMaxIoRetries() == maxRetryCount); + testMaxIOConfig(abfsConfig); + } + + private AbfsConfiguration getAbfsConfig() throws Exception { + Configuration + config = new Configuration(this.getRawConfiguration()); + return new AbfsConfiguration(config, "dummyAccountName"); + } + + private void testMaxIOConfig(AbfsConfiguration abfsConfig) { + ExponentialRetryPolicy retryPolicy = new ExponentialRetryPolicy( + abfsConfig.getMaxIoRetries()); + int localRetryCount = 0; + + while (localRetryCount < abfsConfig.getMaxIoRetries()) { + Assert.assertTrue( + "Retry should be allowed when retryCount less than max count configured.", + retryPolicy.shouldRetry(localRetryCount, -1)); + localRetryCount++; + } + + Assert.assertTrue( + "When all retries are exhausted, the retryCount will be same as max configured", + localRetryCount == abfsConfig.getMaxIoRetries()); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestTextFileBasedIdentityHandler.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestTextFileBasedIdentityHandler.java new file mode 100644 index 0000000000000..f9950faf944df --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestTextFileBasedIdentityHandler.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.services; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.charset.Charset; + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.fs.azurebfs.utils.TextFileBasedIdentityHandler; + +import static org.apache.hadoop.test.LambdaTestUtils.intercept; + +public class TestTextFileBasedIdentityHandler { + + @ClassRule + public static TemporaryFolder tempDir = new TemporaryFolder(); + private static File userMappingFile = null; + private static File groupMappingFile = null; + private static final String NEW_LINE = "\n"; + private static String testUserDataLine1 = + "a2b27aec-77bd-46dd-8c8c-39611a333331:user1:11000:21000:spi-user1:abcf86e9-5a5b-49e2-a253-f5c9e2afd4ec" + + NEW_LINE; + private static String testUserDataLine2 = + "#i2j27aec-77bd-46dd-8c8c-39611a333331:user2:41000:21000:spi-user2:mnof86e9-5a5b-49e2-a253-f5c9e2afd4ec" + + NEW_LINE; + private static String testUserDataLine3 = + "c2d27aec-77bd-46dd-8c8c-39611a333331:user2:21000:21000:spi-user2:deff86e9-5a5b-49e2-a253-f5c9e2afd4ec" + + NEW_LINE; + private static String testUserDataLine4 = "e2f27aec-77bd-46dd-8c8c-39611a333331c" + NEW_LINE; + private static String testUserDataLine5 = + "g2h27aec-77bd-46dd-8c8c-39611a333331:user4:41000:21000:spi-user4:jklf86e9-5a5b-49e2-a253-f5c9e2afd4ec" + + NEW_LINE; + private static String testUserDataLine6 = " " + NEW_LINE; + private static String testUserDataLine7 = + "i2j27aec-77bd-46dd-8c8c-39611a333331:user5:41000:21000:spi-user5:mknf86e9-5a5b-49e2-a253-f5c9e2afd4ec" + + NEW_LINE; + + private static String testGroupDataLine1 = "1d23024d-957c-4456-aac1-a57f9e2de914:group1:21000:sgp-group1" + NEW_LINE; + private static String testGroupDataLine2 = "3d43024d-957c-4456-aac1-a57f9e2de914:group2:21000:sgp-group2" + NEW_LINE; + private static String testGroupDataLine3 = "5d63024d-957c-4456-aac1-a57f9e2de914" + NEW_LINE; + private static String testGroupDataLine4 = " " + NEW_LINE; + private static String testGroupDataLine5 = "7d83024d-957c-4456-aac1-a57f9e2de914:group4:21000:sgp-group4" + NEW_LINE; + + @BeforeClass + public static void init() throws IOException { + userMappingFile = tempDir.newFile("user-mapping.conf"); + groupMappingFile = tempDir.newFile("group-mapping.conf"); + + //Stage data for user mapping + FileUtils.writeStringToFile(userMappingFile, testUserDataLine1, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine2, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine3, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine4, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine5, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine6, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine7, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(userMappingFile, NEW_LINE, Charset.forName("UTF-8"), true); + + //Stage data for group mapping + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine1, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine2, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine3, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine4, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine5, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(groupMappingFile, NEW_LINE, Charset.forName("UTF-8"), true); + } + + private void assertUserLookup(TextFileBasedIdentityHandler handler, String userInTest, String expectedUser) + throws IOException { + String actualUser = handler.lookupForLocalUserIdentity(userInTest); + Assert.assertEquals("Wrong user identity for ", expectedUser, actualUser); + } + + @Test + public void testLookupForUser() throws IOException { + TextFileBasedIdentityHandler handler = + new TextFileBasedIdentityHandler(userMappingFile.getPath(), groupMappingFile.getPath()); + + //Success scenario => user in test -> user2. + assertUserLookup(handler, testUserDataLine3.split(":")[0], testUserDataLine3.split(":")[1]); + + //No username found in the mapping file. + assertUserLookup(handler, "bogusIdentity", ""); + + //Edge case when username is empty string. + assertUserLookup(handler, "", ""); + } + + @Test + public void testLookupForUserFileNotFound() throws Exception { + TextFileBasedIdentityHandler handler = + new TextFileBasedIdentityHandler(userMappingFile.getPath() + ".test", groupMappingFile.getPath()); + intercept(FileNotFoundException.class, "FileNotFoundException", + () -> handler.lookupForLocalUserIdentity(testUserDataLine3.split(":")[0])); + } + + private void assertGroupLookup(TextFileBasedIdentityHandler handler, String groupInTest, String expectedGroup) + throws IOException { + String actualGroup = handler.lookupForLocalGroupIdentity(groupInTest); + Assert.assertEquals("Wrong group identity for ", expectedGroup, actualGroup); + } + + @Test + public void testLookupForGroup() throws IOException { + TextFileBasedIdentityHandler handler = + new TextFileBasedIdentityHandler(userMappingFile.getPath(), groupMappingFile.getPath()); + + //Success scenario. + assertGroupLookup(handler, testGroupDataLine2.split(":")[0], testGroupDataLine2.split(":")[1]); + + //No group name found in the mapping file. + assertGroupLookup(handler, "bogusIdentity", ""); + + //Edge case when group name is empty string. + assertGroupLookup(handler, "", ""); + } + + @Test + public void testLookupForGroupFileNotFound() throws Exception { + TextFileBasedIdentityHandler handler = + new TextFileBasedIdentityHandler(userMappingFile.getPath(), groupMappingFile.getPath() + ".test"); + intercept(FileNotFoundException.class, "FileNotFoundException", + () -> handler.lookupForLocalGroupIdentity(testGroupDataLine2.split(":")[0])); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DelegationSASGenerator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DelegationSASGenerator.java new file mode 100644 index 0000000000000..6f2209a6e8ced --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DelegationSASGenerator.java @@ -0,0 +1,192 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +import java.time.Instant; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider; +import org.apache.hadoop.fs.azurebfs.services.AbfsUriQueryBuilder; + + +/** + * Test Delegation SAS generator. + */ +public class DelegationSASGenerator extends SASGenerator { + private final String skoid; + private final String sktid; + private final String skt; + private final String ske; + private final String sks = "b"; + private final String skv; + + public DelegationSASGenerator(byte[] userDelegationKey, String skoid, String sktid, String skt, String ske, String skv) { + super(userDelegationKey); + this.skoid = skoid; + this.sktid = sktid; + this.skt = skt; + this.ske = ske; + this.skv = skv; + } + + public String getDelegationSAS(String accountName, String containerName, String path, String operation, + String saoid, String suoid, String scid) { + + final String sv = AuthenticationVersion.Feb20.toString(); + final String st = ISO_8601_FORMATTER.format(Instant.now().minus(FIVE_MINUTES)); + final String se = ISO_8601_FORMATTER.format(Instant.now().plus(ONE_DAY)); + String sr = "b"; + String sdd = null; + String sp; + + switch (operation) { + case SASTokenProvider.CREATE_FILE_OPERATION: + case SASTokenProvider.CREATE_DIRECTORY_OPERATION: + case SASTokenProvider.WRITE_OPERATION: + case SASTokenProvider.SET_PROPERTIES_OPERATION: + sp = "w"; + break; + case SASTokenProvider.DELETE_OPERATION: + sp = "d"; + break; + case SASTokenProvider.DELETE_RECURSIVE_OPERATION: + sp = "d"; + sr = "d"; + sdd = Integer.toString(StringUtils.countMatches(path, "/")); + break; + case SASTokenProvider.CHECK_ACCESS_OPERATION: + case SASTokenProvider.GET_ACL_OPERATION: + case SASTokenProvider.GET_STATUS_OPERATION: + sp = "e"; + break; + case SASTokenProvider.LIST_OPERATION: + sp = "l"; + break; + case SASTokenProvider.GET_PROPERTIES_OPERATION: + case SASTokenProvider.READ_OPERATION: + sp = "r"; + break; + case SASTokenProvider.RENAME_DESTINATION_OPERATION: + case SASTokenProvider.RENAME_SOURCE_OPERATION: + sp = "m"; + break; + case SASTokenProvider.SET_ACL_OPERATION: + case SASTokenProvider.SET_PERMISSION_OPERATION: + sp = "p"; + break; + case SASTokenProvider.SET_OWNER_OPERATION: + sp = "o"; + break; + default: + throw new IllegalArgumentException(operation); + } + + String signature = computeSignatureForSAS(sp, st, se, sv, sr, accountName, containerName, + path, saoid, suoid, scid); + + AbfsUriQueryBuilder qb = new AbfsUriQueryBuilder(); + qb.addQuery("skoid", skoid); + qb.addQuery("sktid", sktid); + qb.addQuery("skt", skt); + qb.addQuery("ske", ske); + qb.addQuery("sks", sks); + qb.addQuery("skv", skv); + if (saoid != null) { + qb.addQuery("saoid", saoid); + } + if (suoid != null) { + qb.addQuery("suoid", suoid); + } + if (scid != null) { + qb.addQuery("scid", scid); + } + qb.addQuery("sp", sp); + qb.addQuery("st", st); + qb.addQuery("se", se); + qb.addQuery("sv", sv); + qb.addQuery("sr", sr); + if (sdd != null) { + qb.addQuery("sdd", sdd); + } + qb.addQuery("sig", signature); + return qb.toString().substring(1); + } + + private String computeSignatureForSAS(String sp, String st, String se, String sv, + String sr, String accountName, String containerName, + String path, String saoid, String suoid, String scid) { + + StringBuilder sb = new StringBuilder(); + sb.append(sp); + sb.append("\n"); + sb.append(st); + sb.append("\n"); + sb.append(se); + sb.append("\n"); + // canonicalized resource + sb.append("/blob/"); + sb.append(accountName); + sb.append("/"); + sb.append(containerName); + if (path != null && !sr.equals("c")) { + sb.append(path); + } + sb.append("\n"); + sb.append(skoid); + sb.append("\n"); + sb.append(sktid); + sb.append("\n"); + sb.append(skt); + sb.append("\n"); + sb.append(ske); + sb.append("\n"); + sb.append(sks); + sb.append("\n"); + sb.append(skv); + sb.append("\n"); + if (saoid != null) { + sb.append(saoid); + } + sb.append("\n"); + if (suoid != null) { + sb.append(suoid); + } + sb.append("\n"); + if (scid != null) { + sb.append(scid); + } + sb.append("\n"); + + sb.append("\n"); // sip + sb.append("\n"); // spr + sb.append(sv); + sb.append("\n"); + sb.append(sr); + sb.append("\n"); + sb.append("\n"); // - For optional : rscc - ResponseCacheControl + sb.append("\n"); // - For optional : rscd - ResponseContentDisposition + sb.append("\n"); // - For optional : rsce - ResponseContentEncoding + sb.append("\n"); // - For optional : rscl - ResponseContentLanguage + sb.append("\n"); // - For optional : rsct - ResponseContentType + + String stringToSign = sb.toString(); + LOG.debug("Delegation SAS stringToSign: " + stringToSign.replace("\n", ".")); + return computeHmac256(stringToSign); + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/SASGenerator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/SASGenerator.java index 19bf9e2c45327..2e9289d8d44c7 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/SASGenerator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/SASGenerator.java @@ -19,104 +19,76 @@ package org.apache.hadoop.fs.azurebfs.utils; import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.time.Duration; import java.time.format.DateTimeFormatter; -import java.time.Instant; import java.time.ZoneId; import java.util.Locale; import javax.crypto.Mac; import javax.crypto.spec.SecretKeySpec; -import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants; -import org.apache.hadoop.fs.azurebfs.services.AbfsUriQueryBuilder; - +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Test container SAS generator. + * Test SAS generator. */ -public class SASGenerator { +public abstract class SASGenerator { + + public enum AuthenticationVersion { + Nov18("2018-11-09"), + Dec19("2019-12-12"), + Feb20("2020-02-10"); + + private final String ver; + + AuthenticationVersion(String version) { + this.ver = version; + } - private static final String HMAC_SHA256 = "HmacSHA256"; - private static final int TOKEN_START_PERIOD_IN_SECONDS = 5 * 60; - private static final int TOKEN_EXPIRY_PERIOD_IN_SECONDS = 24 * 60 * 60; - public static final DateTimeFormatter ISO_8601_UTC_DATE_FORMATTER = + @Override + public String toString() { + return ver; + } + } + + protected static final Logger LOG = LoggerFactory.getLogger(SASGenerator.class); + public static final Duration FIVE_MINUTES = Duration.ofMinutes(5); + public static final Duration ONE_DAY = Duration.ofDays(1); + public static final DateTimeFormatter ISO_8601_FORMATTER = DateTimeFormatter .ofPattern("yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.ROOT) .withZone(ZoneId.of("UTC")); + private Mac hmacSha256; private byte[] key; - public SASGenerator(byte[] key) { - this.key = key; - initializeMac(); - } - - public String getContainerSASWithFullControl(String accountName, String containerName) { - String sp = "rcwdl"; - String sv = "2018-11-09"; - String sr = "c"; - String st = ISO_8601_UTC_DATE_FORMATTER.format(Instant.now().minusSeconds(TOKEN_START_PERIOD_IN_SECONDS)); - String se = - ISO_8601_UTC_DATE_FORMATTER.format(Instant.now().plusSeconds(TOKEN_EXPIRY_PERIOD_IN_SECONDS)); - - String signature = computeSignatureForSAS(sp, st, se, sv, "c", - accountName, containerName); - - AbfsUriQueryBuilder qb = new AbfsUriQueryBuilder(); - qb.addQuery("sp", sp); - qb.addQuery("st", st); - qb.addQuery("se", se); - qb.addQuery("sv", sv); - qb.addQuery("sr", sr); - qb.addQuery("sig", signature); - return qb.toString().substring(1); + // hide default constructor + private SASGenerator() { } - private String computeSignatureForSAS(String sp, String st, - String se, String sv, String sr, String accountName, String containerName) { - - StringBuilder sb = new StringBuilder(); - sb.append(sp); - sb.append("\n"); - sb.append(st); - sb.append("\n"); - sb.append(se); - sb.append("\n"); - // canonicalized resource - sb.append("/blob/"); - sb.append(accountName); - sb.append("/"); - sb.append(containerName); - sb.append("\n"); - sb.append("\n"); // si - sb.append("\n"); // sip - sb.append("\n"); // spr - sb.append(sv); - sb.append("\n"); - sb.append(sr); - sb.append("\n"); - sb.append("\n"); // - For optional : rscc - ResponseCacheControl - sb.append("\n"); // - For optional : rscd - ResponseContentDisposition - sb.append("\n"); // - For optional : rsce - ResponseContentEncoding - sb.append("\n"); // - For optional : rscl - ResponseContentLanguage - sb.append("\n"); // - For optional : rsct - ResponseContentType - - String stringToSign = sb.toString(); - return computeHmac256(stringToSign); + /** + * Called by subclasses to initialize the cryptographic SHA-256 HMAC provider. + * @param key - a 256-bit secret key + */ + protected SASGenerator(byte[] key) { + this.key = key; + initializeMac(); } private void initializeMac() { // Initializes the HMAC-SHA256 Mac and SecretKey. try { - hmacSha256 = Mac.getInstance(HMAC_SHA256); - hmacSha256.init(new SecretKeySpec(key, HMAC_SHA256)); + hmacSha256 = Mac.getInstance("HmacSHA256"); + hmacSha256.init(new SecretKeySpec(key, "HmacSHA256")); } catch (final Exception e) { throw new IllegalArgumentException(e); } } - private String computeHmac256(final String stringToSign) { + protected String computeHmac256(final String stringToSign) { byte[] utf8Bytes; try { - utf8Bytes = stringToSign.getBytes(AbfsHttpConstants.UTF_8); + utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8.toString()); } catch (final UnsupportedEncodingException e) { throw new IllegalArgumentException(e); } @@ -126,4 +98,4 @@ private String computeHmac256(final String stringToSign) { } return Base64.encode(hmac); } -} +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/ServiceSASGenerator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/ServiceSASGenerator.java new file mode 100644 index 0000000000000..24a1cea255b4a --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/ServiceSASGenerator.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +import java.time.Instant; + +import org.apache.hadoop.fs.azurebfs.services.AbfsUriQueryBuilder; + +/** + * Test Service SAS generator. + */ +public class ServiceSASGenerator extends SASGenerator { + + /** + * Creates a SAS Generator for Service SAS + * (https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas). + * @param accountKey - the storage account key + */ + public ServiceSASGenerator(byte[] accountKey) { + super(accountKey); + } + + public String getContainerSASWithFullControl(String accountName, String containerName) { + String sp = "rcwdl"; + String sv = AuthenticationVersion.Feb20.toString(); + String sr = "c"; + String st = ISO_8601_FORMATTER.format(Instant.now().minus(FIVE_MINUTES)); + String se = ISO_8601_FORMATTER.format(Instant.now().plus(ONE_DAY)); + + String signature = computeSignatureForSAS(sp, st, se, sv, "c", + accountName, containerName, null); + + AbfsUriQueryBuilder qb = new AbfsUriQueryBuilder(); + qb.addQuery("sp", sp); + qb.addQuery("st", st); + qb.addQuery("se", se); + qb.addQuery("sv", sv); + qb.addQuery("sr", sr); + qb.addQuery("sig", signature); + return qb.toString().substring(1); + } + + private String computeSignatureForSAS(String sp, String st, String se, String sv, + String sr, String accountName, String containerName, String path) { + + StringBuilder sb = new StringBuilder(); + sb.append(sp); + sb.append("\n"); + sb.append(st); + sb.append("\n"); + sb.append(se); + sb.append("\n"); + // canonicalized resource + sb.append("/blob/"); + sb.append(accountName); + sb.append("/"); + sb.append(containerName); + if (path != null && !sr.equals("c")) { + //sb.append("/"); + sb.append(path); + } + sb.append("\n"); + sb.append("\n"); // si + sb.append("\n"); // sip + sb.append("\n"); // spr + sb.append(sv); + sb.append("\n"); + sb.append(sr); + sb.append("\n"); + sb.append("\n"); // - For optional : rscc - ResponseCacheControl + sb.append("\n"); // - For optional : rscd - ResponseContentDisposition + sb.append("\n"); // - For optional : rsce - ResponseContentEncoding + sb.append("\n"); // - For optional : rscl - ResponseContentLanguage + sb.append("\n"); // - For optional : rsct - ResponseContentType + + String stringToSign = sb.toString(); + LOG.debug("Service SAS stringToSign: " + stringToSign.replace("\n", ".")); + return computeHmac256(stringToSign); + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestCachedSASToken.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestCachedSASToken.java new file mode 100644 index 0000000000000..cbba80877206f --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestCachedSASToken.java @@ -0,0 +1,196 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +import java.io.IOException; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.util.UUID; + +import org.junit.Assert; +import org.junit.Test; + +import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS; +import static java.time.temporal.ChronoUnit.SECONDS; +import static java.time.temporal.ChronoUnit.DAYS; + +/** + * Test CachedSASToken. + */ +public final class TestCachedSASToken { + + @Test + public void testUpdateAndGet() throws IOException { + CachedSASToken cachedSasToken = new CachedSASToken(); + + String se1 = OffsetDateTime.now(ZoneOffset.UTC).plus( + DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS * 2, + SECONDS).format(DateTimeFormatter.ISO_DATE_TIME); + String token1 = "se=" + se1; + + // set first time and ensure reference equality + cachedSasToken.update(token1); + String cachedToken = cachedSasToken.get(); + Assert.assertTrue(token1 == cachedToken); + + // update with same token and ensure reference equality + cachedSasToken.update(token1); + cachedToken = cachedSasToken.get(); + Assert.assertTrue(token1 == cachedToken); + + // renew and ensure reference equality + String se2 = OffsetDateTime.now(ZoneOffset.UTC).plus( + DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS * 2, + SECONDS).format(DateTimeFormatter.ISO_DATE_TIME); + String token2 = "se=" + se2; + cachedSasToken.update(token2); + cachedToken = cachedSasToken.get(); + Assert.assertTrue(token2 == cachedToken); + + // renew and ensure reference equality with ske + String se3 = OffsetDateTime.now(ZoneOffset.UTC).plus( + DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS * 4, + SECONDS).format(DateTimeFormatter.ISO_DATE_TIME); + + String ske3 = OffsetDateTime.now(ZoneOffset.UTC).plus( + DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS * 2, + SECONDS).format(DateTimeFormatter.ISO_DATE_TIME); + String token3 = "se=" + se3 + "&ske=" + ske3; + cachedSasToken.update(token3); + cachedToken = cachedSasToken.get(); + Assert.assertTrue(token3 == cachedToken); + } + + @Test + public void testGetExpiration() throws IOException { + CachedSASToken cachedSasToken = new CachedSASToken(); + + String se = OffsetDateTime.now(ZoneOffset.UTC).plus( + DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS - 1, + SECONDS).format(DateTimeFormatter.ISO_DATE_TIME); + OffsetDateTime seDate = OffsetDateTime.parse(se, DateTimeFormatter.ISO_DATE_TIME); + String token = "se=" + se; + + // By-pass the normal validation provided by update method + // by callng set with expired SAS, then ensure the get + // method returns null (auto expiration as next REST operation will use + // SASTokenProvider to get a new SAS). + cachedSasToken.setForTesting(token, seDate); + String cachedToken = cachedSasToken.get(); + Assert.assertNull(cachedToken); + } + + @Test + public void testUpdateAndGetWithExpiredToken() throws IOException { + CachedSASToken cachedSasToken = new CachedSASToken(); + + String se1 = OffsetDateTime.now(ZoneOffset.UTC).plus( + DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS - 1, + SECONDS).format(DateTimeFormatter.ISO_DATE_TIME); + String token1 = "se=" + se1; + + // set expired token and ensure not cached + cachedSasToken.update(token1); + String cachedToken = cachedSasToken.get(); + Assert.assertNull(cachedToken); + + String se2 = OffsetDateTime.now(ZoneOffset.UTC).plus( + DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS * 2, + SECONDS).format(DateTimeFormatter.ISO_DATE_TIME); + + String ske2 = OffsetDateTime.now(ZoneOffset.UTC).plus( + DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS - 1, + SECONDS).format(DateTimeFormatter.ISO_DATE_TIME); + String token2 = "se=" + se2 + "&ske=" + ske2; + + // set with expired ske and ensure not cached + cachedSasToken.update(token2); + cachedToken = cachedSasToken.get(); + Assert.assertNull(cachedToken); + + } + + @Test + public void testUpdateAndGetWithInvalidToken() throws IOException { + CachedSASToken cachedSasToken = new CachedSASToken(); + + // set and ensure reference that it is not cached + String token1 = "se="; + cachedSasToken.update(token1); + String cachedToken = cachedSasToken.get(); + Assert.assertNull(cachedToken); + + // set and ensure reference that it is not cached + String token2 = "se=xyz"; + cachedSasToken.update(token2); + cachedToken = cachedSasToken.get(); + Assert.assertNull(cachedToken); + + // set and ensure reference that it is not cached + String token3 = "se=2100-01-01T00:00:00Z&ske="; + cachedSasToken.update(token3); + cachedToken = cachedSasToken.get(); + Assert.assertNull(cachedToken); + + // set and ensure reference that it is not cached + String token4 = "se=2100-01-01T00:00:00Z&ske=xyz&"; + cachedSasToken.update(token4); + cachedToken = cachedSasToken.get(); + Assert.assertNull(cachedToken); + + // set and ensure reference that it is not cached + String token5 = "se=abc&ske=xyz&"; + cachedSasToken.update(token5); + cachedToken = cachedSasToken.get(); + Assert.assertNull(cachedToken); + } + + public static CachedSASToken getTestCachedSASTokenInstance() { + String expiryPostADay = OffsetDateTime.now(ZoneOffset.UTC) + .plus(1, DAYS) + .format(DateTimeFormatter.ISO_DATE_TIME); + String version = "2020-20-20"; + + StringBuilder sb = new StringBuilder(); + sb.append("skoid="); + sb.append(UUID.randomUUID().toString()); + sb.append("&sktid="); + sb.append(UUID.randomUUID().toString()); + sb.append("&skt="); + sb.append(OffsetDateTime.now(ZoneOffset.UTC) + .minus(1, DAYS) + .format(DateTimeFormatter.ISO_DATE_TIME)); + sb.append("&ske="); + sb.append(expiryPostADay); + sb.append("&sks=b"); + sb.append("&skv="); + sb.append(version); + sb.append("&sp=rw"); + sb.append("&sr=b"); + sb.append("&se="); + sb.append(expiryPostADay); + sb.append("&sv=2"); + sb.append(version); + + CachedSASToken cachedSASToken = new CachedSASToken(); + cachedSASToken.update(sb.toString()); + return cachedSASToken; + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestMockHelpers.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestMockHelpers.java new file mode 100644 index 0000000000000..e25a099a00ef3 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestMockHelpers.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azurebfs.utils; + +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; + +/** + * Test Mock Helpers. + */ +public final class TestMockHelpers { + + /** + * Sets a class field by reflection. + * @param type + * @param obj + * @param fieldName + * @param fieldObject + * @param + * @return + * @throws Exception + */ + public static T setClassField( + Class type, + final T obj, + final String fieldName, + Object fieldObject) throws Exception { + + Field field = type.getDeclaredField(fieldName); + field.setAccessible(true); + Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, + field.getModifiers() & ~Modifier.FINAL); + field.set(obj, fieldObject); + + return obj; + } + + private TestMockHelpers() { + // Not called. - For checkstyle: HideUtilityClassConstructor + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/resources/abfs.xml b/hadoop-tools/hadoop-azure/src/test/resources/abfs.xml index 1561da2234c6f..f06e5cac9b8b2 100644 --- a/hadoop-tools/hadoop-azure/src/test/resources/abfs.xml +++ b/hadoop-tools/hadoop-azure/src/test/resources/abfs.xml @@ -66,4 +66,20 @@ fs.contract.supports-unbuffer true + + + fs.contract.supports-hflush + true + + + + fs.contract.supports-hsync + true + + + + fs.contract.metadata_updated_on_hsync + true + + diff --git a/hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml.template b/hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml.template new file mode 100644 index 0000000000000..2e6e2750c3b78 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml.template @@ -0,0 +1,174 @@ + + + + + + + + + + + fs.azure.account.auth.type + SharedKey + + + + + + fs.azure.account.key.{ABFS_ACCOUNT_NAME}.dfs.core.windows.net + {ACCOUNT_ACCESS_KEY} + Account access key + + + + fs.azure.account.oauth.provider.type.{ABFS_ACCOUNT_NAME}.dfs.core.windows.net + + org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider + OAuth token provider implementation class + + + + fs.azure.account.oauth2.client.endpoint.{ABFS_ACCOUNT_NAME}.dfs.core.windows.net + + https://login.microsoftonline.com/{TENANTID}/oauth2/token + Token end point, this can be found through Azure portal + + + + + fs.azure.account.oauth2.client.id.{ABFS_ACCOUNT_NAME}.dfs.core.windows.net + + {client id} + AAD client id. + + + + fs.azure.account.oauth2.client.secret.{ABFS_ACCOUNT_NAME}.dfs.core.windows.net + + {client secret} + AAD client secret + + + + + fs.contract.test.fs.abfs + abfs://{CONTAINER_NAME}@{ACCOUNT_NAME}.dfs.core.windows.net + + + fs.contract.test.fs.abfss + abfss://{CONTAINER_NAME}@{ACCOUNT_NAME}.dfs.core.windows.net + + + + + fs.azure.wasb.account.name + {WASB_ACCOUNT_NAME}.blob.core.windows.net + + + fs.azure.account.key.{WASB_ACCOUNT_NAME}.blob.core.windows.net + WASB account key + + + fs.contract.test.fs.wasb + wasb://{WASB_FILESYSTEM}@{WASB_ACCOUNT_NAME}.blob.core.windows.net + + + + + + fs.azure.account.oauth2.contributor.client.id + {Client id of SP with RBAC Storage Blob Data Contributor} + + + fs.azure.account.oauth2.contributor.client.secret + {Client secret of SP with RBAC Storage Blob Data Contributor} + + + fs.azure.account.oauth2.reader.client.id + {Client id of SP with RBAC Storage Blob Data Reader} + + + fs.azure.account.oauth2.reader.client.secret + {Client secret of SP with RBAC Storage Blob Data Reader} + + + + + + + fs.azure.account.test.oauth2.client.id + {client id} + The client id(app id) for the app created on step 1 + + + + fs.azure.account.test.oauth2.client.secret + {client secret} + +The client secret(application's secret) for the app created on step 1 + + + + fs.azure.check.access.testuser.guid + {guid} + The guid fetched on step 2 + + + fs.azure.account.oauth2.client.endpoint.{account name}.dfs.core +.windows.net + https://login.microsoftonline.com/{TENANTID}/oauth2/token + +Token end point. This can be found through Azure portal. As part of CheckAccess +test cases. The access will be tested for an FS instance created with the +above mentioned client credentials. So this configuration is necessary to +create the test FS instance. + + + + + + fs.azure.test.appendblob.enabled + false + If made true, tests will be running under the assumption that + append blob is enabled and the root directory and contract test root + directory will be part of the append blob directories. + + + + diff --git a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml index d43d67e7b5fa3..24ffeb5d107a0 100644 --- a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml +++ b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml @@ -38,6 +38,11 @@ false + + fs.contract.rename-returns-false-if-dest-exists + true + + @@ -61,4 +66,8 @@ + + + + diff --git a/hadoop-tools/hadoop-datajoin/pom.xml b/hadoop-tools/hadoop-datajoin/pom.xml index 2353cfbff6626..f256f6d7b0841 100644 --- a/hadoop-tools/hadoop-datajoin/pom.xml +++ b/hadoop-tools/hadoop-datajoin/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-datajoin - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Data Join Apache Hadoop Data Join jar @@ -108,10 +108,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 711c3fcff7eef..ab4cac483f854 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-distcp - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Distributed Copy Apache Hadoop Distributed Copy jar diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java index 6f8aa34b29584..164d014876dbd 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java @@ -35,7 +35,7 @@ import java.net.URI; import java.util.Set; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * The CopyListing abstraction is responsible for how the list of diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java index 9f6f136c6e6f2..0d722abcdaba6 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java @@ -40,9 +40,9 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; -import com.google.common.base.Objects; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.Objects; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * CopyListingFileStatus is a view of {@link FileStatus}, recording additional diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java index c36335afc161c..21ef47e2b05df 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.Random; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -44,7 +44,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * DistCp is the main driver-class for DistCpV2. @@ -214,6 +214,8 @@ public Job createAndSubmitJob() throws Exception { String jobID = job.getJobID().toString(); job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); + // Set the jobId for the applications running through run method. + getConf().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); LOG.info("DistCp job-id: " + jobID); return job; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java index 4a6552fed6b55..78e1e2eac1845 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java @@ -18,8 +18,8 @@ package org.apache.hadoop.tools; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java index 3b9d13b3b0308..1fbea9a0ea287 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java @@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * The OptionsParser parses out the command-line options passed to DistCp, @@ -197,9 +197,6 @@ public static DistCpOptions parse(String[] args) if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) { builder.preserve( getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())); - } else { - // No "preserve" settings specified. Preserve block-size. - builder.preserve(DistCpOptions.FileAttribute.BLOCKSIZE); } if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) { diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java index 2f4ea69e8fd5c..080bcef7956e9 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java @@ -35,7 +35,7 @@ import java.util.List; import java.util.regex.Pattern; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A CopyFilter which compares Java Regex Patterns to each Path to determine diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java index 7e5a26a36abe7..ddcbb14e8314f 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java @@ -18,7 +18,7 @@ package org.apache.hadoop.tools; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,7 +39,7 @@ import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.security.Credentials; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.FileNotFoundException; import java.io.IOException; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java index 139bd08fd7abc..e346d0b938c93 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java @@ -318,8 +318,10 @@ private void preserveFileAttributesForDirectories(Configuration conf) SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(sourceListing)); long totalLen = clusterFS.getFileStatus(sourceListing).getLen(); - - Path targetRoot = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH)); + // For Atomic Copy the Final & Work Path are different & atomic copy has + // already moved it to final path. + Path targetRoot = + new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); long preservedEntries = 0; try { diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/DeletedDirTracker.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/DeletedDirTracker.java index 64431f7e5459c..7e11add9b7a79 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/DeletedDirTracker.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/DeletedDirTracker.java @@ -18,9 +18,9 @@ package org.apache.hadoop.tools.mapred; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.fs.Path; import org.apache.hadoop.tools.CopyListingFileStatus; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java index 4ed856fcb7deb..408919549c67a 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java @@ -45,7 +45,7 @@ import org.apache.hadoop.tools.util.RetriableCommand; import org.apache.hadoop.tools.util.ThrottledInputStream; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class extends RetriableCommand to implement the copy of files, diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java index 73c49bb8f1a61..1af434e19f823 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.tools.util; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -199,6 +199,9 @@ public static void preserve(FileSystem targetFS, Path path, EnumSet attributes, boolean preserveRawXattrs) throws IOException { + // strip out those attributes we don't need any more + attributes.remove(FileAttribute.BLOCKSIZE); + attributes.remove(FileAttribute.CHECKSUMTYPE); // If not preserving anything from FileStatus, don't bother fetching it. FileStatus targetFileStatus = attributes.isEmpty() ? null : targetFS.getFileStatus(path); diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm index bf5b89135fccb..c1da0e36383cd 100644 --- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm +++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm @@ -337,7 +337,7 @@ Command Line Options | Flag | Description | Notes | | ----------------- | ------------------------------------ | -------- | -| `-p[rbugpcaxt]` | Preserve r: replication number b: block size u: user g: group p: permission c: checksum-type a: ACL x: XAttr t: timestamp | When `-update` is specified, status updates will **not** be synchronized unless the file sizes also differ (i.e. unless the file is re-created). If -pa is specified, DistCp preserves the permissions also because ACLs are a super-set of permissions. The option -pr is only valid if both source and target directory are not erasure coded. **Note:** If -p option's are not specified, then by default block size is preserved. | +| `-p[rbugpcaxt]` | Preserve r: replication number b: block size u: user g: group p: permission c: checksum-type a: ACL x: XAttr t: timestamp | When `-update` is specified, status updates will **not** be synchronized unless the file sizes also differ (i.e. unless the file is re-created). If -pa is specified, DistCp preserves the permissions also because ACLs are a super-set of permissions. The option -pr is only valid if both source and target directory are not erasure coded. | | `-i` | Ignore failures | As explained in the Appendix, this option will keep more accurate statistics about the copy than the default case. It also preserves logs from failed copies, which can be valuable for debugging. Finally, a failing map will not cause the job to fail before all splits are attempted. | | `-log ` | Write logs to \ | DistCp keeps logs of each file it attempts to copy as map output. If a map fails, the log output will not be retained if it is re-executed. | | `-v` | Log additional info (path, size) in the SKIP/COPY log | This option can only be used with -log option. | diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java index 8adc2cfb867fc..bdf416a0199b9 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java @@ -32,7 +32,7 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * Tests distcp in combination with HDFS raw.* XAttrs. diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java index 96193e8b1b759..77d428b7d1031 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java @@ -40,7 +40,7 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * Tests distcp in combination with HDFS XAttrs. diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java index b48355af25ba5..85b312a94b52f 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java @@ -415,7 +415,7 @@ public void testPreserve() { "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); - Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); + Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index eeaf30a929996..1a40d78b2651a 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.tools.contract; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; +import static org.apache.hadoop.tools.DistCpConstants.CONF_LABEL_DISTCP_JOB_ID; import java.io.IOException; import java.util.Collections; @@ -42,6 +43,7 @@ import org.apache.hadoop.tools.DistCpConstants; import org.apache.hadoop.tools.DistCpOptions; import org.apache.hadoop.tools.mapred.CopyMapper; +import org.apache.hadoop.tools.util.DistCpTestUtils; import org.junit.Before; import org.junit.Rule; @@ -464,6 +466,17 @@ public void testLargeFilesFromRemote() throws Exception { largeFiles(remoteFS, remoteDir, localFS, localDir); } + @Test + public void testSetJobId() throws Exception { + describe("check jobId is set in the conf"); + remoteFS.create(new Path(remoteDir, "file1")).close(); + DistCpTestUtils + .assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(), + localDir.toString(), null, conf); + assertNotNull("DistCp job id isn't set", + conf.get(CONF_LABEL_DISTCP_JOB_ID)); + } + /** * Executes a DistCp using a file system sub-tree with multiple nesting * levels. diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java index 11118c1f72400..685f030e15ea0 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java @@ -53,6 +53,8 @@ import java.util.*; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; +import static org.apache.hadoop.tools.DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH; +import static org.apache.hadoop.tools.DistCpConstants.CONF_LABEL_TARGET_WORK_PATH; import static org.apache.hadoop.tools.util.TestDistCpUtils.*; public class TestCopyCommitter { @@ -160,10 +162,10 @@ public void testPreserveStatus() throws IOException { context.setTargetPathExists(false); CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS); - Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong())); + Path listingFile = new Path("/tmp1/" + rand.nextLong()); listing.buildListing(listingFile, context); - conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase); committer.commitJob(jobContext); checkDirectoryPermissions(fs, targetBase, sourcePerm); @@ -179,6 +181,45 @@ public void testPreserveStatus() throws IOException { } + @Test + public void testPreserveStatusWithAtomicCommit() throws IOException { + TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); + JobContext jobContext = new JobContextImpl( + taskAttemptContext.getConfiguration(), + taskAttemptContext.getTaskAttemptID().getJobID()); + Configuration conf = jobContext.getConfiguration(); + String sourceBase; + String workBase; + String targetBase; + FileSystem fs = null; + try { + OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); + fs = FileSystem.get(conf); + FsPermission sourcePerm = new FsPermission((short) 511); + FsPermission initialPerm = new FsPermission((short) 448); + sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm); + workBase = TestDistCpUtils.createTestSetup(fs, initialPerm); + targetBase = "/tmp1/" + rand.nextLong(); + final DistCpOptions options = new DistCpOptions.Builder( + Collections.singletonList(new Path(sourceBase)), new Path("/out")) + .preserve(FileAttribute.PERMISSION).build(); + options.appendToConf(conf); + final DistCpContext context = new DistCpContext(options); + context.setTargetPathExists(false); + CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS); + Path listingFile = new Path("/tmp1/" + rand.nextLong()); + listing.buildListing(listingFile, context); + conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_WORK_PATH, workBase); + conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true); + committer.commitJob(jobContext); + checkDirectoryPermissions(fs, targetBase, sourcePerm); + } finally { + TestDistCpUtils.delete(fs, "/tmp1"); + conf.unset(DistCpConstants.CONF_LABEL_PRESERVE_STATUS); + } + } + @Test public void testDeleteMissing() throws IOException { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); @@ -207,8 +248,8 @@ public void testDeleteMissing() throws IOException { Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile, context); - conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase); - conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_WORK_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase); committer.commitJob(jobContext); verifyFoldersAreInSync(fs, targetBase, sourceBase); @@ -256,8 +297,8 @@ public void testPreserveTimeWithDeleteMiss() throws IOException { Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile, context); - conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase); - conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_WORK_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase); Path sourceListing = new Path( conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH)); @@ -320,8 +361,8 @@ public void testDeleteMissingFlatInterleavedFiles() throws IOException { Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile, context); - conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase); - conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_WORK_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase); committer.commitJob(jobContext); verifyFoldersAreInSync(fs, targetBase, sourceBase); @@ -353,8 +394,8 @@ public void testAtomicCommitMissingFinal() throws IOException { fs = FileSystem.get(conf); fs.mkdirs(new Path(workPath)); - conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workPath); - conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, finalPath); + conf.set(CONF_LABEL_TARGET_WORK_PATH, workPath); + conf.set(CONF_LABEL_TARGET_FINAL_PATH, finalPath); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true); assertPathExists(fs, "Work path", new Path(workPath)); @@ -391,8 +432,8 @@ public void testAtomicCommitExistingFinal() throws IOException { fs.mkdirs(new Path(workPath)); fs.mkdirs(new Path(finalPath)); - conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workPath); - conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, finalPath); + conf.set(CONF_LABEL_TARGET_WORK_PATH, workPath); + conf.set(CONF_LABEL_TARGET_FINAL_PATH, finalPath); conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true); assertPathExists(fs, "Work path", new Path(workPath)); @@ -463,8 +504,8 @@ private void testCommitWithChecksumMismatch(boolean skipCrc) + String.valueOf(rand.nextLong())); listing.buildListing(listingFile, context); - conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase); - conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_WORK_PATH, targetBase); + conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase); OutputCommitter committer = new CopyCommitter( null, taskAttemptContext); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java index 4acb022786c63..f10dbf5573835 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java @@ -43,8 +43,9 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; import java.util.EnumSet; @@ -66,6 +67,7 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.assertj.core.api.Assertions.assertThat; @@ -191,15 +193,95 @@ public void testPreserveDefaults() throws IOException { DistCpUtils.preserve(fs, dst, srcStatus, attributes, false); - CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); + assertStatusEqual(fs, dst, srcStatus); + } + + private void assertStatusEqual(final FileSystem fs, + final Path dst, + final CopyListingFileStatus srcStatus) throws IOException { + FileStatus destStatus = fs.getFileStatus(dst); + CopyListingFileStatus dstStatus = new CopyListingFileStatus( + destStatus); + + String text = String.format("Source %s; dest %s: wrong ", srcStatus, + destStatus); // FileStatus.equals only compares path field, must explicitly compare all fields - Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission())); - Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner())); - Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup())); - Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime()); - Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime()); - Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication()); + assertEquals(text + "permission", + srcStatus.getPermission(), dstStatus.getPermission()); + assertEquals(text + "owner", + srcStatus.getOwner(), dstStatus.getOwner()); + assertEquals(text + "group", + srcStatus.getGroup(), dstStatus.getGroup()); + assertEquals(text + "accessTime", + srcStatus.getAccessTime(), dstStatus.getAccessTime()); + assertEquals(text + "modificationTime", + srcStatus.getModificationTime(), dstStatus.getModificationTime()); + assertEquals(text + "replication", + srcStatus.getReplication(), dstStatus.getReplication()); + } + + private void assertStatusNotEqual(final FileSystem fs, + final Path dst, + final CopyListingFileStatus srcStatus) throws IOException { + FileStatus destStatus = fs.getFileStatus(dst); + CopyListingFileStatus dstStatus = new CopyListingFileStatus( + destStatus); + + String text = String.format("Source %s; dest %s: wrong ", + srcStatus, destStatus); + // FileStatus.equals only compares path field, + // must explicitly compare all fields + assertNotEquals(text + "permission", + srcStatus.getPermission(), dstStatus.getPermission()); + assertNotEquals(text + "owner", + srcStatus.getOwner(), dstStatus.getOwner()); + assertNotEquals(text + "group", + srcStatus.getGroup(), dstStatus.getGroup()); + assertNotEquals(text + "accessTime", + srcStatus.getAccessTime(), dstStatus.getAccessTime()); + assertNotEquals(text + "modificationTime", + srcStatus.getModificationTime(), dstStatus.getModificationTime()); + assertNotEquals(text + "replication", + srcStatus.getReplication(), dstStatus.getReplication()); + } + + + @Test + public void testSkipsNeedlessAttributes() throws Exception { + FileSystem fs = FileSystem.get(config); + + // preserve replication, block size, user, group, permission, + // checksum type and timestamps + + Path src = new Path("/tmp/testSkipsNeedlessAttributes/source"); + Path dst = new Path("/tmp/testSkipsNeedlessAttributes/dest"); + + // there is no need to actually create a source file, just a file + // status of one + CopyListingFileStatus srcStatus = new CopyListingFileStatus( + new FileStatus(0, false, 1, 32, 0, src)); + + // if an attribute is needed, preserve will fail to find the file + EnumSet attrs = EnumSet.of(FileAttribute.ACL, + FileAttribute.GROUP, + FileAttribute.PERMISSION, + FileAttribute.TIMES, + FileAttribute.XATTR); + for (FileAttribute attr : attrs) { + intercept(FileNotFoundException.class, () -> + DistCpUtils.preserve(fs, dst, srcStatus, + EnumSet.of(attr), + false)); + } + + // but with the preservation flags only used + // in file creation, this does not happen + DistCpUtils.preserve(fs, dst, srcStatus, + EnumSet.of( + FileAttribute.BLOCKSIZE, + FileAttribute.CHECKSUMTYPE), + false); } @Test @@ -258,16 +340,8 @@ public void testPreserveAclsforDefaultACL() throws IOException { // FileStatus.equals only compares path field, must explicitly compare all // fields - Assert.assertEquals("getPermission", srcStatus.getPermission(), - dstStatus.getPermission()); - Assert.assertEquals("Owner", srcStatus.getOwner(), dstStatus.getOwner()); - Assert.assertEquals("Group", srcStatus.getGroup(), dstStatus.getGroup()); - Assert.assertEquals("AccessTime", srcStatus.getAccessTime(), - dstStatus.getAccessTime()); - Assert.assertEquals("ModificationTime", srcStatus.getModificationTime(), - dstStatus.getModificationTime()); - Assert.assertEquals("Replication", srcStatus.getReplication(), - dstStatus.getReplication()); + assertStatusEqual(fs, dest, srcStatus); + Assert.assertArrayEquals(en1.toArray(), dd2.toArray()); } @@ -486,12 +560,7 @@ public void testPreserveNothingOnFile() throws IOException { CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst)); // FileStatus.equals only compares path field, must explicitly compare all fields - Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission())); - Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner())); - Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup())); - Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime()); - Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime()); - Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication()); + assertStatusNotEqual(fs, dst, srcStatus); } @Test @@ -842,13 +911,7 @@ public void testPreserveOnFileUpwardRecursion() throws IOException { // FileStatus.equals only compares path field, must explicitly compare all fields // attributes of src -> f2 ? should be yes - CopyListingFileStatus f2Status = new CopyListingFileStatus(fs.getFileStatus(f2)); - Assert.assertTrue(srcStatus.getPermission().equals(f2Status.getPermission())); - Assert.assertTrue(srcStatus.getOwner().equals(f2Status.getOwner())); - Assert.assertTrue(srcStatus.getGroup().equals(f2Status.getGroup())); - Assert.assertTrue(srcStatus.getAccessTime() == f2Status.getAccessTime()); - Assert.assertTrue(srcStatus.getModificationTime() == f2Status.getModificationTime()); - Assert.assertTrue(srcStatus.getReplication() == f2Status.getReplication()); + assertStatusEqual(fs, f2, srcStatus); // attributes of src -> f1 ? should be no CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1)); @@ -1047,13 +1110,7 @@ public void testPreserveOnFileDownwardRecursion() throws IOException { // FileStatus.equals only compares path field, must explicitly compare all fields // attributes of src -> f0 ? should be yes - CopyListingFileStatus f0Status = new CopyListingFileStatus(fs.getFileStatus(f0)); - Assert.assertTrue(srcStatus.getPermission().equals(f0Status.getPermission())); - Assert.assertTrue(srcStatus.getOwner().equals(f0Status.getOwner())); - Assert.assertTrue(srcStatus.getGroup().equals(f0Status.getGroup())); - Assert.assertTrue(srcStatus.getAccessTime() == f0Status.getAccessTime()); - Assert.assertTrue(srcStatus.getModificationTime() == f0Status.getModificationTime()); - Assert.assertTrue(srcStatus.getReplication() == f0Status.getReplication()); + assertStatusEqual(fs, f0, srcStatus); // attributes of src -> f1 ? should be no CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1)); diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-blockgen/pom.xml b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-blockgen/pom.xml index 6ab4d8f96cafb..d92c63fdccd12 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-blockgen/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-blockgen/pom.xml @@ -19,11 +19,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../../hadoop-project hadoop-dynamometer-blockgen - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Dynamometer Block Listing Generator Apache Hadoop Dynamometer Block Listing Generator jar diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-dist/pom.xml b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-dist/pom.xml index dbd23952dbc71..a905d87e54cc5 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-dist/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-dist/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../../hadoop-project-dist hadoop-dynamometer-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Dynamometer Dist Apache Hadoop Dynamometer Dist jar diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml index 0995c485af185..4b7fea4106b94 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/pom.xml @@ -19,11 +19,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../../hadoop-project hadoop-dynamometer-infra - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Dynamometer Cluster Simulator Apache Hadoop Dynamometer Cluster Simulator jar @@ -40,8 +40,8 @@ compile - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava compile diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AMOptions.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AMOptions.java index 5eb16e523463d..77f8c2ce57224 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AMOptions.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AMOptions.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.util.HashMap; import java.util.List; import java.util.Map; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AllowAllImpersonationProvider.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AllowAllImpersonationProvider.java index 490453f298633..6800bbe0ef814 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AllowAllImpersonationProvider.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/AllowAllImpersonationProvider.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.tools.dynamometer; +import java.net.InetAddress; + import org.apache.hadoop.conf.Configured; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ImpersonationProvider; @@ -32,7 +34,7 @@ public void init(String configurationPrefix) { // Do nothing } - public void authorize(UserGroupInformation user, String remoteAddress) { + public void authorize(UserGroupInformation user, InetAddress remoteAddress) { // Do nothing } diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java index 015a5a692a012..094721b98d58a 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.tools.dynamometer; -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; -import com.google.common.primitives.Ints; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java index 3c735dd1cfccd..3c8baec15c74f 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.tools.dynamometer; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.base.Splitter; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.util.Optional; import java.util.function.Supplier; import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java index bc127083a1da6..ee0810d6439d7 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java index 056b7de70b870..174d9cc039e03 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import java.util.Optional; import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeUnit; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml index 25a1f91bc2cc7..9342a221f23d7 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/pom.xml @@ -19,11 +19,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../../hadoop-project hadoop-dynamometer-workload - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Dynamometer Workload Simulator Apache Hadoop Dynamometer Workload Simulator jar diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/CreateFileMapper.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/CreateFileMapper.java index 33dc81d5a24a1..64b8dc28e67d0 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/CreateFileMapper.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/CreateFileMapper.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer.workloadgenerator; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.IOException; import java.io.OutputStream; import java.net.URI; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditLogDirectParser.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditLogDirectParser.java index 9a1aa243127e4..e649b69be73a7 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditLogDirectParser.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditLogDirectParser.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer.workloadgenerator.audit; -import com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; import java.io.IOException; import java.text.DateFormat; import java.text.ParseException; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayMapper.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayMapper.java index 4dad215409c81..c46f720bd0a87 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayMapper.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayMapper.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer.workloadgenerator.audit; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.util.Optional; import java.util.function.Function; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java index 274c5a763bd0f..14e8c9cb82f16 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.dynamometer.workloadgenerator.audit; -import com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; import org.apache.hadoop.tools.dynamometer.workloadgenerator.WorkloadDriver; import java.io.IOException; import java.net.URI; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/test/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/TestWorkloadGenerator.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/test/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/TestWorkloadGenerator.java index 0162352f08f60..f3edc24b70444 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/test/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/TestWorkloadGenerator.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/test/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/TestWorkloadGenerator.java @@ -24,6 +24,7 @@ import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditLogHiveTableParser; import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper; import java.io.IOException; +import java.net.InetAddress; import java.nio.charset.StandardCharsets; import org.apache.hadoop.conf.Configuration; @@ -115,7 +116,7 @@ public void init(String configurationPrefix) { // Do nothing } - public void authorize(UserGroupInformation user, String remoteAddress) + public void authorize(UserGroupInformation user, InetAddress remoteAddress) throws AuthorizationException { try { if (!user.getRealUser().getShortUserName() diff --git a/hadoop-tools/hadoop-dynamometer/pom.xml b/hadoop-tools/hadoop-dynamometer/pom.xml index dd96039295a2c..832f0aff63725 100644 --- a/hadoop-tools/hadoop-dynamometer/pom.xml +++ b/hadoop-tools/hadoop-dynamometer/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-dynamometer - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Dynamometer Apache Hadoop Dynamometer pom diff --git a/hadoop-tools/hadoop-extras/pom.xml b/hadoop-tools/hadoop-extras/pom.xml index f93b0e15aa009..36f7242142446 100644 --- a/hadoop-tools/hadoop-extras/pom.xml +++ b/hadoop-tools/hadoop-extras/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-extras - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Extras Apache Hadoop Extras jar diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml index 6a8c43f1ecbf6..0a5d863b7d0e6 100644 --- a/hadoop-tools/hadoop-fs2img/pom.xml +++ b/hadoop-tools/hadoop-fs2img/pom.xml @@ -17,12 +17,12 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project org.apache.hadoop hadoop-fs2img - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Image Generation Tool Apache Hadoop Image Generation Tool jar @@ -87,10 +87,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java index cb9614058936f..9c8dc1f2304c1 100644 --- a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java +++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java @@ -35,7 +35,7 @@ import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java index 1932f28e3d95b..ad3474c476dcb 100644 --- a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java +++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/TreePath.java @@ -19,7 +19,7 @@ import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml index b9e433bd08180..cde7ff18358ea 100644 --- a/hadoop-tools/hadoop-gridmix/pom.xml +++ b/hadoop-tools/hadoop-gridmix/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-gridmix - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Gridmix Apache Hadoop Gridmix jar @@ -123,10 +123,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java index bfbf516611110..2fb0becebf29b 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.ArrayList; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.gridmix.Progressive; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml index a227ad4efd029..d2f6ab409d0b5 100644 --- a/hadoop-tools/hadoop-kafka/pom.xml +++ b/hadoop-tools/hadoop-kafka/pom.xml @@ -19,11 +19,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-kafka - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Kafka Library support This module contains code to support integration with Kafka. @@ -39,10 +39,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true Max @@ -98,6 +97,10 @@ org.xerial.snappy snappy-java + + net.jpountz.lz4 + lz4 + diff --git a/hadoop-tools/hadoop-kafka/src/main/java/org/apache/hadoop/metrics2/sink/KafkaSink.java b/hadoop-tools/hadoop-kafka/src/main/java/org/apache/hadoop/metrics2/sink/KafkaSink.java index c83552b94d311..0856d0f4e0eeb 100644 --- a/hadoop-tools/hadoop-kafka/src/main/java/org/apache/hadoop/metrics2/sink/KafkaSink.java +++ b/hadoop-tools/hadoop-kafka/src/main/java/org/apache/hadoop/metrics2/sink/KafkaSink.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.sink; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.commons.configuration2.SubsetConfiguration; diff --git a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java index 8d74bf247567f..fb19172359a5f 100644 --- a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java +++ b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.impl; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.configuration2.SubsetConfiguration; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml index 0236fbb897eb1..e6cfc667729e3 100644 --- a/hadoop-tools/hadoop-openstack/pom.xml +++ b/hadoop-tools/hadoop-openstack/pom.xml @@ -19,11 +19,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-openstack - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop OpenStack support This module contains code to support integration with OpenStack. @@ -66,10 +66,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml diff --git a/hadoop-tools/hadoop-pipes/pom.xml b/hadoop-tools/hadoop-pipes/pom.xml index bda7fb35bef85..51da009a14601 100644 --- a/hadoop-tools/hadoop-pipes/pom.xml +++ b/hadoop-tools/hadoop-pipes/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-pipes - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Pipes Apache Hadoop Pipes pom diff --git a/hadoop-tools/hadoop-resourceestimator/pom.xml b/hadoop-tools/hadoop-resourceestimator/pom.xml index bd82cc8c8ccc5..f6b7c6fb2f592 100644 --- a/hadoop-tools/hadoop-resourceestimator/pom.xml +++ b/hadoop-tools/hadoop-resourceestimator/pom.xml @@ -25,7 +25,7 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-resourceestimator diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml index d4b7d64d3bfb1..efad007b7d533 100644 --- a/hadoop-tools/hadoop-rumen/pom.xml +++ b/hadoop-tools/hadoop-rumen/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-rumen - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Rumen Apache Hadoop Rumen jar @@ -102,10 +102,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml index 848b8c509fd6f..57eb6c7b1e7fd 100644 --- a/hadoop-tools/hadoop-sls/pom.xml +++ b/hadoop-tools/hadoop-sls/pom.xml @@ -19,11 +19,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-sls - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Scheduler Load Simulator Apache Hadoop Scheduler Load Simulator jar @@ -108,10 +108,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java index 6f75bd17c6f0a..5bfa8dc021a34 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java @@ -813,7 +813,7 @@ private void increaseQueueAppNum(String queue) throws YarnException { if (appNum == null) { appNum = 1; } else { - appNum++; + appNum = appNum + 1; } queueAppNumMap.put(queueName, appNum); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/DAGAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/DAGAMSimulator.java index e44d47a72e472..f886a69e02496 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/DAGAMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/DAGAMSimulator.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.sls.appmaster; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java index d1a6245dd3f7c..2ec39762b8b99 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java @@ -27,7 +27,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.DelayQueue; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java index 4cf8aef18ff0f..32567db666ef3 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java @@ -246,6 +246,13 @@ public boolean isUpdatedCapability() { @Override public void resetUpdatedCapability() { } + + @Override + public long calculateHeartBeatInterval( + long defaultInterval, long minInterval, long maxInterval, + float speedupFactor, float slowdownFactor) { + return defaultInterval; + } } public static RMNode newNodeInfo(String rackName, String hostName, diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/resourcemanager/MockAMLauncher.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/resourcemanager/MockAMLauncher.java index 208629afdb45b..37bf96afa05ad 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/resourcemanager/MockAMLauncher.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/resourcemanager/MockAMLauncher.java @@ -29,7 +29,6 @@ import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; -import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; @@ -82,36 +81,39 @@ private void setupAMRMToken(RMAppAttempt appAttempt) { @Override @SuppressWarnings("unchecked") public void handle(AMLauncherEvent event) { - if (AMLauncherEventType.LAUNCH == event.getType()) { - ApplicationId appId = - event.getAppAttempt().getAppAttemptId().getApplicationId(); - - // find AMSimulator - AMSimulator ams = appIdAMSim.get(appId); - if (ams != null) { - try { - Container amContainer = event.getAppAttempt().getMasterContainer(); - - setupAMRMToken(event.getAppAttempt()); - - // Notify RMAppAttempt to change state - super.context.getDispatcher().getEventHandler().handle( - new RMAppAttemptEvent(event.getAppAttempt().getAppAttemptId(), - RMAppAttemptEventType.LAUNCHED)); - - ams.notifyAMContainerLaunched( - event.getAppAttempt().getMasterContainer()); - LOG.info("Notify AM launcher launched:" + amContainer.getId()); - - se.getNmMap().get(amContainer.getNodeId()) - .addNewContainer(amContainer, 100000000L); - - return; - } catch (Exception e) { - throw new YarnRuntimeException(e); - } + ApplicationId appId = + event.getAppAttempt().getAppAttemptId().getApplicationId(); + // find AMSimulator + AMSimulator ams = appIdAMSim.get(appId); + if (ams == null) { + throw new YarnRuntimeException( + "Didn't find any AMSimulator for applicationId=" + appId); + } + Container amContainer = event.getAppAttempt().getMasterContainer(); + switch (event.getType()) { + case LAUNCH: + try { + setupAMRMToken(event.getAppAttempt()); + // Notify RMAppAttempt to change state + super.context.getDispatcher().getEventHandler().handle( + new RMAppAttemptEvent(event.getAppAttempt().getAppAttemptId(), + RMAppAttemptEventType.LAUNCHED)); + + ams.notifyAMContainerLaunched( + event.getAppAttempt().getMasterContainer()); + LOG.info("Notify AM launcher launched:" + amContainer.getId()); + + se.getNmMap().get(amContainer.getNodeId()) + .addNewContainer(amContainer, -1); + return; + } catch (Exception e) { + throw new YarnRuntimeException(e); } - + case CLEANUP: + se.getNmMap().get(amContainer.getNodeId()) + .cleanupContainer(amContainer.getId()); + break; + default: throw new YarnRuntimeException( "Didn't find any AMSimulator for applicationId=" + appId); } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java index a5aee7477c034..488ef2580e231 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.sls.scheduler; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java index 750b708f89007..b5ae4f5b3c0ae 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java @@ -231,4 +231,11 @@ public boolean isUpdatedCapability() { @Override public void resetUpdatedCapability() { } + + @Override + public long calculateHeartBeatInterval( + long defaultInterval, long minInterval, long maxInterval, + float speedupFactor, float slowdownFactor) { + return defaultInterval; + } } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java index c7ba9eef62336..b3b7a8240383d 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.ObjectInputStream; +import java.nio.charset.StandardCharsets; import java.text.MessageFormat; import java.util.HashMap; import java.util.Map; @@ -94,11 +95,11 @@ public class SLSWebApp extends HttpServlet { ClassLoader cl = Thread.currentThread().getContextClassLoader(); try { simulateInfoTemplate = IOUtils.toString( - cl.getResourceAsStream("html/simulate.info.html.template")); + cl.getResourceAsStream("html/simulate.info.html.template"), StandardCharsets.UTF_8); simulateTemplate = IOUtils.toString( - cl.getResourceAsStream("html/simulate.html.template")); + cl.getResourceAsStream("html/simulate.html.template"), StandardCharsets.UTF_8); trackTemplate = IOUtils.toString( - cl.getResourceAsStream("html/track.html.template")); + cl.getResourceAsStream("html/track.html.template"), StandardCharsets.UTF_8); } catch (IOException e) { e.printStackTrace(); } diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/nodemanager/TestNMSimulator.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/nodemanager/TestNMSimulator.java index 8f4a4135bba53..003417e6d21a2 100644 --- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/nodemanager/TestNMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/nodemanager/TestNMSimulator.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.sls.nodemanager; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/web/TestSLSWebApp.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/web/TestSLSWebApp.java index c9be450fee290..c3f66a0b74d07 100644 --- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/web/TestSLSWebApp.java +++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/web/TestSLSWebApp.java @@ -23,6 +23,7 @@ import org.junit.Test; import java.io.File; +import java.nio.charset.StandardCharsets; import java.text.MessageFormat; import java.util.HashSet; import java.util.Map; @@ -34,7 +35,7 @@ public class TestSLSWebApp { @Test public void testSimulateInfoPageHtmlTemplate() throws Exception { String simulateInfoTemplate = FileUtils.readFileToString( - new File("src/main/html/simulate.info.html.template")); + new File("src/main/html/simulate.info.html.template"), StandardCharsets.UTF_8); Map simulateInfoMap = new HashMap<>(); simulateInfoMap.put("Number of racks", 10); @@ -72,7 +73,7 @@ public void testSimulateInfoPageHtmlTemplate() throws Exception { @Test public void testSimulatePageHtmlTemplate() throws Exception { String simulateTemplate = FileUtils.readFileToString( - new File("src/main/html/simulate.html.template")); + new File("src/main/html/simulate.html.template"), StandardCharsets.UTF_8); Set queues = new HashSet(); queues.add("sls_queue_1"); @@ -96,7 +97,7 @@ public void testSimulatePageHtmlTemplate() throws Exception { @Test public void testTrackPageHtmlTemplate() throws Exception { String trackTemplate = FileUtils.readFileToString( - new File("src/main/html/track.html.template")); + new File("src/main/html/track.html.template"), StandardCharsets.UTF_8); String trackedQueueInfo = ""; Set trackedQueues = new HashSet(); trackedQueues.add("sls_queue_1"); diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml index 9b379ab243a9a..def7b232ddd7c 100644 --- a/hadoop-tools/hadoop-streaming/pom.xml +++ b/hadoop-tools/hadoop-streaming/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-streaming - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop MapReduce Streaming Apache Hadoop MapReduce Streaming jar @@ -129,10 +129,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${basedir}/dev-support/findbugs-exclude.xml Max diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java index 0ef1ff0b32872..5a4e3a960d2be 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java @@ -31,13 +31,13 @@ import java.util.Properties; import java.util.StringTokenizer; +import org.junit.BeforeClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.ClusterMapReduceTestCase; import org.apache.hadoop.mapred.Counters; -import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RunningJob; import org.apache.hadoop.mapred.SkipBadRecords; import org.apache.hadoop.mapred.Utils; @@ -65,7 +65,12 @@ public class TestStreamingBadRecords extends ClusterMapReduceTestCase private static final String badReducer = UtilTest.makeJavaCommand(BadApp.class, new String[]{"true"}); private static final int INPUTSIZE=100; - + + @BeforeClass + public static void setupClass() throws Exception { + setupClassBase(TestStreamingBadRecords.class); + } + public TestStreamingBadRecords() throws IOException { UtilTest utilTest = new UtilTest(getClass().getName()); diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 708c04e6cc19f..09585f06de9cb 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project-dist hadoop-tools-dist - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Tools Dist Apache Hadoop Tools Dist jar diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index 31b26ac6954a7..547cd3a512763 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -20,11 +20,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-tools - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Tools Apache Hadoop Tools pom diff --git a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg index 0678acc7198e6..587855d188180 100644 --- a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg +++ b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg @@ -8,15 +8,15 @@ feature.tc.enabled=false #[docker] # module.enabled=## enable/disable the module. set to "true" to enable, disabled by default # docker.binary=/usr/bin/docker -# docker.allowed.capabilities=## comma seperated capabilities that can be granted, e.g CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE -# docker.allowed.devices=## comma seperated list of devices that can be mounted into a container -# docker.allowed.networks=## comma seperated networks that can be used. e.g bridge,host,none -# docker.allowed.ro-mounts=## comma seperated volumes that can be mounted as read-only -# docker.allowed.rw-mounts=## comma seperate volumes that can be mounted as read-write, add the yarn local and log dirs to this list to run Hadoop jobs +# docker.allowed.capabilities=## comma separated capabilities that can be granted, e.g CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE +# docker.allowed.devices=## comma separated list of devices that can be mounted into a container +# docker.allowed.networks=## comma separated networks that can be used. e.g bridge,host,none +# docker.allowed.ro-mounts=## comma separated volumes that can be mounted as read-only +# docker.allowed.rw-mounts=## comma separate volumes that can be mounted as read-write, add the yarn local and log dirs to this list to run Hadoop jobs # docker.privileged-containers.enabled=false -# docker.allowed.volume-drivers=## comma seperated list of allowed volume-drivers +# docker.allowed.volume-drivers=## comma separated list of allowed volume-drivers # docker.no-new-privileges.enabled=## enable/disable the no-new-privileges flag for docker run. Set to "true" to enable, disabled by default -# docker.allowed.runtimes=## comma seperated runtimes that can be used. +# docker.allowed.runtimes=## comma separated runtimes that can be used. # The configs below deal with settings for FPGA resource #[fpga] diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 3a37293357a01..95706f9e1360c 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -705,4 +705,10 @@ + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.2.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.2.2.xml new file mode 100644 index 0000000000000..74a0a25c9bad0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.2.2.xml @@ -0,0 +1,3006 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + In secure mode, YARN verifies access to the application, queue + etc. before accepting the request. +

    + If the user does not have VIEW_APP access then the following + fields in the report will be set to stubbed values: +

      +
    • host - set to "N/A"
    • +
    • RPC port - set to -1
    • +
    • client token - set to "N/A"
    • +
    • diagnostics - set to "N/A"
    • +
    • tracking URL - set to "N/A"
    • +
    • original tracking URL - set to "N/A"
    • +
    • resource usage report - all values are -1
    • +
    + + @param appId + {@link ApplicationId} of the application that needs a report + @return application report + @throws YarnException + @throws IOException]]> + + + + + + + + Get a report (ApplicationReport) of all Applications in the cluster. +

    + +

    + If the user does not have VIEW_APP access for an application + then the corresponding report will be filtered as described in + {@link #getApplicationReport(ApplicationId)}. +

    + + @return a list of reports for all applications + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get a report of the given ApplicationAttempt. +

    + +

    + In secure mode, YARN verifies access to the application, queue + etc. before accepting the request. +

    + + @param applicationAttemptId + {@link ApplicationAttemptId} of the application attempt that needs + a report + @return application attempt report + @throws YarnException + @throws ApplicationAttemptNotFoundException if application attempt + not found + @throws IOException]]> +
    +
    + + + + + + + Get a report of all (ApplicationAttempts) of Application in the cluster. +

    + + @param applicationId + @return a list of reports for all application attempts for specified + application + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get a report of the given Container. +

    + +

    + In secure mode, YARN verifies access to the application, queue + etc. before accepting the request. +

    + + @param containerId + {@link ContainerId} of the container that needs a report + @return container report + @throws YarnException + @throws ContainerNotFoundException if container not found + @throws IOException]]> +
    +
    + + + + + + + Get a report of all (Containers) of ApplicationAttempt in the cluster. +

    + + @param applicationAttemptId + @return a list of reports of all containers for specified application + attempt + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + + + + {@code + AMRMClient.createAMRMClientContainerRequest() + } + @return the newly create AMRMClient instance.]]> + + + + + + + + + + + + + + + + RegisterApplicationMasterResponse + @throws YarnException + @throws IOException]]> + + + + + + + + + + + RegisterApplicationMasterResponse + @throws YarnException + @throws IOException]]> + + + + + + + + addContainerRequest are sent to the + ResourceManager. New containers assigned to the master are + retrieved. Status of completed containers and node health updates are also + retrieved. This also doubles up as a heartbeat to the ResourceManager and + must be made periodically. The call may not always return any new + allocations of containers. App should not make concurrent allocate + requests. May cause request loss. + +

    + Note : If the user has not removed container requests that have already + been satisfied, then the re-register may end up sending the entire + container requests to the RM (including matched requests). Which would mean + the RM could end up giving it a lot of new allocated containers. +

    + + @param progressIndicator Indicates progress made by the master + @return the response of the allocate request + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + + + + + + + + allocate + @param req Resource request]]> + + + + + + + + + + + + + allocate. + Any previous pending resource change request of the same container will be + removed. + + Application that calls this method is expected to maintain the + Containers that are returned from previous successful + allocations or resource changes. By passing in the existing container and a + target resource capability to this method, the application requests the + ResourceManager to change the existing resource allocation to the target + resource allocation. + + @deprecated use + {@link #requestContainerUpdate(Container, UpdateContainerRequest)} + + @param container The container returned from the last successful resource + allocation or resource change + @param capability The target resource capability of the container]]> + + + + + + + allocate. + Any previous pending update request of the same container will be + removed. + + @param container The container returned from the last successful resource + allocation or update + @param updateContainerRequest The UpdateContainerRequest.]]> + + + + + + + + + + + + + + + + + + + + + + + + ContainerRequests matching the given + parameters. These ContainerRequests should have been added via + addContainerRequest earlier in the lifecycle. For performance, + the AMRMClient may return its internal collection directly without creating + a copy. Users should not perform mutable operations on the return value. + Each collection in the list contains requests with identical + Resource size that fit in the given capability. In a + collection, requests will be returned in the same order as they were added. + + NOTE: This API only matches Container requests that were created by the + client WITHOUT the allocationRequestId being set. + + @return Collection of request matching the parameters]]> + + + + + + + + + ContainerRequests matching the given + parameters. These ContainerRequests should have been added via + addContainerRequest earlier in the lifecycle. For performance, + the AMRMClient may return its internal collection directly without creating + a copy. Users should not perform mutable operations on the return value. + Each collection in the list contains requests with identical + Resource size that fit in the given capability. In a + collection, requests will be returned in the same order as they were added. + specify an ExecutionType. + + NOTE: This API only matches Container requests that were created by the + client WITHOUT the allocationRequestId being set. + + @param priority Priority + @param resourceName Location + @param executionType ExecutionType + @param capability Capability + @return Collection of request matching the parameters]]> + + + + + + + + + + + + + ContainerRequests matching the given + allocationRequestId. These ContainerRequests should have been added via + addContainerRequest earlier in the lifecycle. For performance, + the AMRMClient may return its internal collection directly without creating + a copy. Users should not perform mutable operations on the return value. + + NOTE: This API only matches Container requests that were created by the + client WITH the allocationRequestId being set to a non-default value. + + @param allocationRequestId Allocation Request Id + @return Collection of request matching the parameters]]> + + + + + + + + + + + + + AMRMClient. This cache must + be shared with the {@link NMClient} used to manage containers for the + AMRMClient +

    + If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} + singleton instance will be used. + + @param nmTokenCache the NM token cache to use.]]> + + + + + AMRMClient. This cache must be + shared with the {@link NMClient} used to manage containers for the + AMRMClient. +

    + If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} + singleton instance will be used. + + @return the NM token cache.]]> + + + + + + + + + + + + + + + + + + + + + + + + check to return true for each 1000 ms. + See also {@link #waitFor(java.util.function.Supplier, int)} + and {@link #waitFor(java.util.function.Supplier, int, int)} + @param check the condition for which it should wait]]> + + + + + + + + check to return true for each + checkEveryMillis ms. + See also {@link #waitFor(java.util.function.Supplier, int, int)} + @param check user defined checker + @param checkEveryMillis interval to call check]]> + + + + + + + + + check to return true for each + checkEveryMillis ms. In the main loop, this method will log + the message "waiting in main loop" for each logInterval times + iteration to confirm the thread is alive. + @param check user defined checker + @param checkEveryMillis interval to call check + @param logInterval interval to log for each]]> + + + + + + + + + + + + + + + + + + + + + + + + + + Start an allocated container.

    + +

    The ApplicationMaster or other applications that use the + client must provide the details of the allocated container, including the + Id, the assigned node's Id and the token via {@link Container}. In + addition, the AM needs to provide the {@link ContainerLaunchContext} as + well.

    + + @param container the allocated container + @param containerLaunchContext the context information needed by the + NodeManager to launch the + container + @return a map between the auxiliary service names and their outputs + @throws YarnException YarnException. + @throws IOException IOException.]]> +
    +
    + + + + + + Increase the resource of a container.

    + +

    The ApplicationMaster or other applications that use the + client must provide the details of the container, including the Id and + the target resource encapsulated in the updated container token via + {@link Container}. +

    + + @param container the container with updated token. + + @throws YarnException YarnException. + @throws IOException IOException.]]> +
    +
    + + + + + + Update the resources of a container.

    + +

    The ApplicationMaster or other applications that use the + client must provide the details of the container, including the Id and + the target resource encapsulated in the updated container token via + {@link Container}. +

    + + @param container the container with updated token. + + @throws YarnException YarnException. + @throws IOException IOException.]]> +
    +
    + + + + + + + Stop an started container.

    + + @param containerId the Id of the started container + @param nodeId the Id of the NodeManager + + @throws YarnException YarnException. + @throws IOException IOException.]]> +
    +
    + + + + + + + Query the status of a container.

    + + @param containerId the Id of the started container + @param nodeId the Id of the NodeManager + + @return the status of a container. + + @throws YarnException YarnException. + @throws IOException IOException.]]> +
    +
    + + + + + + + + Re-Initialize the Container.

    + + @param containerId the Id of the container to Re-Initialize. + @param containerLaunchContex the updated ContainerLaunchContext. + @param autoCommit commit re-initialization automatically ? + + @throws YarnException YarnException. + @throws IOException IOException.]]> +
    +
    + + + + + + Restart the specified container.

    + + @param containerId the Id of the container to restart. + + @throws YarnException YarnException. + @throws IOException IOException.]]> +
    +
    + + + + + + Rollback last reInitialization of the specified container.

    + + @param containerId the Id of the container to restart. + + @throws YarnException YarnException. + @throws IOException IOException.]]> +
    +
    + + + + + + Commit last reInitialization of the specified container.

    + + @param containerId the Id of the container to commit reInitialize. + + @throws YarnException YarnException. + @throws IOException IOException.]]> +
    +
    + + + + Set whether the containers that are started by this client, and are + still running should be stopped when the client stops. By default, the + feature should be enabled.

    However, containers will be stopped only + when service is stopped. i.e. after {@link NMClient#stop()}. + + @param enabled whether the feature is enabled or not]]> +
    +
    + + + + NMClient. This cache must be + shared with the {@link AMRMClient} that requested the containers managed + by this NMClient +

    + If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} + singleton instance will be used. + + @param nmTokenCache the NM token cache to use.]]> + + + + + NMClient. This cache must be + shared with the {@link AMRMClient} that requested the containers managed + by this NMClient +

    + If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} + singleton instance will be used. + + @return the NM token cache]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + By default YARN client libraries {@link AMRMClient} and {@link NMClient} use + {@link #getSingleton()} instance of the cache. +

      +
    • + Using the singleton instance of the cache is appropriate when running a + single ApplicationMaster in the same JVM. +
    • +
    • + When using the singleton, users don't need to do anything special, + {@link AMRMClient} and {@link NMClient} are already set up to use the + default singleton {@link NMTokenCache} +
    • +
    + If running multiple Application Masters in the same JVM, a different cache + instance should be used for each Application Master. +
      +
    • + If using the {@link AMRMClient} and the {@link NMClient}, setting up + and using an instance cache is as follows: +
      +   NMTokenCache nmTokenCache = new NMTokenCache();
      +   AMRMClient rmClient = AMRMClient.createAMRMClient();
      +   NMClient nmClient = NMClient.createNMClient();
      +   nmClient.setNMTokenCache(nmTokenCache);
      +   ...
      + 
      +
    • +
    • + If using the {@link AMRMClientAsync} and the {@link NMClientAsync}, + setting up and using an instance cache is as follows: +
      +   NMTokenCache nmTokenCache = new NMTokenCache();
      +   AMRMClient rmClient = AMRMClient.createAMRMClient();
      +   NMClient nmClient = NMClient.createNMClient();
      +   nmClient.setNMTokenCache(nmTokenCache);
      +   AMRMClientAsync rmClientAsync = new AMRMClientAsync(rmClient, 1000, [AMRM_CALLBACK]);
      +   NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
      +   ...
      + 
      +
    • +
    • + If using {@link ApplicationMasterProtocol} and + {@link ContainerManagementProtocol} directly, setting up and using an + instance cache is as follows: +
      +   NMTokenCache nmTokenCache = new NMTokenCache();
      +   ...
      +   ApplicationMasterProtocol amPro = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
      +   ...
      +   AllocateRequest allocateRequest = ...
      +   ...
      +   AllocateResponse allocateResponse = rmClient.allocate(allocateRequest);
      +   for (NMToken token : allocateResponse.getNMTokens()) {
      +     nmTokenCache.setToken(token.getNodeId().toString(), token.getToken());
      +   }
      +   ...
      +   ContainerManagementProtocolProxy nmPro = ContainerManagementProtocolProxy(conf, nmTokenCache);
      +   ...
      +   nmPro.startContainer(container, containerContext);
      +   ...
      + 
      +
    • +
    + It is also possible to mix the usage of a client ({@code AMRMClient} or + {@code NMClient}, or the async versions of them) with a protocol proxy + ({@code ContainerManagementProtocolProxy} or + {@code ApplicationMasterProtocol}).]]> +
    +
    + + + + + + + + + + + + + + The method to claim a resource with the SharedCacheManager. + The client uses a checksum to identify the resource and an + {@link ApplicationId} to identify which application will be using the + resource. +

    + +

    + The SharedCacheManager responds with whether or not the + resource exists in the cache. If the resource exists, a URL to + the resource in the shared cache is returned. If the resource does not + exist, null is returned instead. +

    + +

    + Once a URL has been returned for a resource, that URL is safe to use for + the lifetime of the application that corresponds to the provided + ApplicationId. +

    + + @param applicationId ApplicationId of the application using the resource + @param resourceKey the key (i.e. checksum) that identifies the resource + @return URL to the resource, or null if it does not exist]]> +
    +
    + + + + + + + The method to release a resource with the SharedCacheManager. + This method is called once an application is no longer using a claimed + resource in the shared cache. The client uses a checksum to identify the + resource and an {@link ApplicationId} to identify which application is + releasing the resource. +

    + +

    + Note: This method is an optimization and the client is not required to call + it for correctness. +

    + + @param applicationId ApplicationId of the application releasing the + resource + @param resourceKey the key (i.e. checksum) that identifies the resource]]> +
    +
    + + + + + + + + + + +
    + + + + + + + + + + + + + + + + Obtain a {@link YarnClientApplication} for a new application, + which in turn contains the {@link ApplicationSubmissionContext} and + {@link org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse} + objects. +

    + + @return {@link YarnClientApplication} built for a new application + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Submit a new application to YARN. It is a blocking call - it + will not return {@link ApplicationId} until the submitted application is + submitted successfully and accepted by the ResourceManager. +

    + +

    + Users should provide an {@link ApplicationId} as part of the parameter + {@link ApplicationSubmissionContext} when submitting a new application, + otherwise it will throw the {@link ApplicationIdNotProvidedException}. +

    + +

    This internally calls {@link ApplicationClientProtocol#submitApplication + (SubmitApplicationRequest)}, and after that, it internally invokes + {@link ApplicationClientProtocol#getApplicationReport + (GetApplicationReportRequest)} and waits till it can make sure that the + application gets properly submitted. If RM fails over or RM restart + happens before ResourceManager saves the application's state, + {@link ApplicationClientProtocol + #getApplicationReport(GetApplicationReportRequest)} will throw + the {@link ApplicationNotFoundException}. This API automatically resubmits + the application with the same {@link ApplicationSubmissionContext} when it + catches the {@link ApplicationNotFoundException}

    + + @param appContext + {@link ApplicationSubmissionContext} containing all the details + needed to submit a new application + @return {@link ApplicationId} of the accepted application + @throws YarnException + @throws IOException + @see #createApplication()]]> +
    +
    + + + + + + + Fail an application attempt identified by given ID. +

    + + @param applicationAttemptId + {@link ApplicationAttemptId} of the attempt to fail. + @throws YarnException + in case of errors or if YARN rejects the request due to + access-control restrictions. + @throws IOException + @see #getQueueAclsInfo()]]> +
    +
    + + + + + + + Kill an application identified by given ID. +

    + + @param applicationId + {@link ApplicationId} of the application that needs to be killed + @throws YarnException + in case of errors or if YARN rejects the request due to + access-control restrictions. + @throws IOException + @see #getQueueAclsInfo()]]> +
    +
    + + + + + + + + Kill an application identified by given ID. +

    + @param applicationId {@link ApplicationId} of the application that needs to + be killed + @param diagnostics for killing an application. + @throws YarnException in case of errors or if YARN rejects the request due + to access-control restrictions. + @throws IOException]]> +
    +
    + + + + + + + Get a report of the given Application. +

    + +

    + In secure mode, YARN verifies access to the application, queue + etc. before accepting the request. +

    + +

    + If the user does not have VIEW_APP access then the following + fields in the report will be set to stubbed values: +

      +
    • host - set to "N/A"
    • +
    • RPC port - set to -1
    • +
    • client token - set to "N/A"
    • +
    • diagnostics - set to "N/A"
    • +
    • tracking URL - set to "N/A"
    • +
    • original tracking URL - set to "N/A"
    • +
    • resource usage report - all values are -1
    • +
    + + @param appId + {@link ApplicationId} of the application that needs a report + @return application report + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + The AMRM token is required for AM to RM scheduling operations. For + managed Application Masters YARN takes care of injecting it. For unmanaged + Applications Masters, the token must be obtained via this method and set + in the {@link org.apache.hadoop.security.UserGroupInformation} of the + current user. +

    + The AMRM token will be returned only if all the following conditions are + met: +

      +
    • the requester is the owner of the ApplicationMaster
    • +
    • the application master is an unmanaged ApplicationMaster
    • +
    • the application master is in ACCEPTED state
    • +
    + Else this method returns NULL. + + @param appId {@link ApplicationId} of the application to get the AMRM token + @return the AMRM token if available + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + Get a report (ApplicationReport) of all Applications in the cluster. +

    + +

    + If the user does not have VIEW_APP access for an application + then the corresponding report will be filtered as described in + {@link #getApplicationReport(ApplicationId)}. +

    + + @return a list of reports of all running applications + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get a report (ApplicationReport) of Applications + matching the given application types in the cluster. +

    + +

    + If the user does not have VIEW_APP access for an application + then the corresponding report will be filtered as described in + {@link #getApplicationReport(ApplicationId)}. +

    + + @param applicationTypes set of application types you are interested in + @return a list of reports of applications + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get a report (ApplicationReport) of Applications matching the given + application states in the cluster. +

    + +

    + If the user does not have VIEW_APP access for an application + then the corresponding report will be filtered as described in + {@link #getApplicationReport(ApplicationId)}. +

    + + @param applicationStates set of application states you are interested in + @return a list of reports of applications + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + + Get a report (ApplicationReport) of Applications matching the given + application types and application states in the cluster. +

    + +

    + If the user does not have VIEW_APP access for an application + then the corresponding report will be filtered as described in + {@link #getApplicationReport(ApplicationId)}. +

    + + @param applicationTypes set of application types you are interested in + @param applicationStates set of application states you are interested in + @return a list of reports of applications + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + + + Get a report (ApplicationReport) of Applications matching the given + application types, application states and application tags in the cluster. +

    + +

    + If the user does not have VIEW_APP access for an application + then the corresponding report will be filtered as described in + {@link #getApplicationReport(ApplicationId)}. +

    + + @param applicationTypes set of application types you are interested in + @param applicationStates set of application states you are interested in + @param applicationTags set of application tags you are interested in + @return a list of reports of applications + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + + + + Get a report (ApplicationReport) of Applications matching the given users, + queues, application types and application states in the cluster. If any of + the params is set to null, it is not used when filtering. +

    + +

    + If the user does not have VIEW_APP access for an application + then the corresponding report will be filtered as described in + {@link #getApplicationReport(ApplicationId)}. +

    + + @param queues set of queues you are interested in + @param users set of users you are interested in + @param applicationTypes set of application types you are interested in + @param applicationStates set of application states you are interested in + @return a list of reports of applications + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get a list of ApplicationReports that match the given + {@link GetApplicationsRequest}. +

    + +

    + If the user does not have VIEW_APP access for an application + then the corresponding report will be filtered as described in + {@link #getApplicationReport(ApplicationId)}. +

    + + @param request the request object to get the list of applications. + @return The list of ApplicationReports that match the request + @throws YarnException Exception specific to YARN. + @throws IOException Exception mostly related to connection errors.]]> +
    +
    + + + + + + Get metrics ({@link YarnClusterMetrics}) about the cluster. +

    + + @return cluster metrics + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get a report of nodes ({@link NodeReport}) in the cluster. +

    + + @param states The {@link NodeState}s to filter on. If no filter states are + given, nodes in all states will be returned. + @return A list of node reports + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get a delegation token so as to be able to talk to YARN using those tokens. + + @param renewer + Address of the renewer who can renew these tokens when needed by + securely talking to YARN. + @return a delegation token ({@link Token}) that can be used to + talk to YARN + @throws YarnException + @throws IOException]]> + + + + + + + + + Get information ({@link QueueInfo}) about a given queue. +

    + + @param queueName + Name of the queue whose information is needed + @return queue information + @throws YarnException + in case of errors or if YARN rejects the request due to + access-control restrictions. + @throws IOException]]> +
    +
    + + + + + + Get information ({@link QueueInfo}) about all queues, recursively if there + is a hierarchy +

    + + @return a list of queue-information for all queues + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + Get information ({@link QueueInfo}) about top level queues. +

    + + @return a list of queue-information for all the top-level queues + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get information ({@link QueueInfo}) about all the immediate children queues + of the given queue +

    + + @param parent + Name of the queue whose child-queues' information is needed + @return a list of queue-information for all queues who are direct children + of the given parent queue. + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + Get information about acls for current user on all the + existing queues. +

    + + @return a list of queue acls ({@link QueueUserACLInfo}) for + current user + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get a report of the given ApplicationAttempt. +

    + +

    + In secure mode, YARN verifies access to the application, queue + etc. before accepting the request. +

    + + @param applicationAttemptId + {@link ApplicationAttemptId} of the application attempt that needs + a report + @return application attempt report + @throws YarnException + @throws ApplicationAttemptNotFoundException if application attempt + not found + @throws IOException]]> +
    +
    + + + + + + + Get a report of all (ApplicationAttempts) of Application in the cluster. +

    + + @param applicationId application id of the app + @return a list of reports for all application attempts for specified + application. + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + Get a report of the given Container. +

    + +

    + In secure mode, YARN verifies access to the application, queue + etc. before accepting the request. +

    + + @param containerId + {@link ContainerId} of the container that needs a report + @return container report + @throws YarnException + @throws ContainerNotFoundException if container not found. + @throws IOException]]> +
    +
    + + + + + + + Get a report of all (Containers) of ApplicationAttempt in the cluster. +

    + + @param applicationAttemptId application attempt id + @return a list of reports of all containers for specified application + attempts + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + + Attempts to move the given application to the given queue. +

    + + @param appId + Application to move. + @param queue + Queue to place it in to. + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + Obtain a {@link GetNewReservationResponse} for a new reservation, + which contains the {@link ReservationId} object. +

    + + @return The {@link GetNewReservationResponse} containing a new + {@link ReservationId} object. + @throws YarnException if reservation cannot be created. + @throws IOException if reservation cannot be created.]]> +
    +
    + + + + + + + The interface used by clients to submit a new reservation to the + {@code ResourceManager}. +

    + +

    + The client packages all details of its request in a + {@link ReservationSubmissionRequest} object. This contains information + about the amount of capacity, temporal constraints, and gang needs. + Furthermore, the reservation might be composed of multiple stages, with + ordering dependencies among them. +

    + +

    + In order to respond, a new admission control component in the + {@code ResourceManager} performs an analysis of the resources that have + been committed over the period of time the user is requesting, verify that + the user requests can be fulfilled, and that it respect a sharing policy + (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined + that the ReservationRequest is satisfiable the {@code ResourceManager} + answers with a {@link ReservationSubmissionResponse} that includes a + {@link ReservationId}. Upon failure to find a valid allocation the response + is an exception with the message detailing the reason of failure. +

    + +

    + The semantics guarantees that the {@link ReservationId} returned, + corresponds to a valid reservation existing in the time-range request by + the user. The amount of capacity dedicated to such reservation can vary + overtime, depending of the allocation that has been determined. But it is + guaranteed to satisfy all the constraint expressed by the user in the + {@link ReservationDefinition} +

    + + @param request request to submit a new Reservation + @return response contains the {@link ReservationId} on accepting the + submission + @throws YarnException if the reservation cannot be created successfully + @throws IOException]]> +
    +
    + + + + + + + The interface used by clients to update an existing Reservation. This is + referred to as a re-negotiation process, in which a user that has + previously submitted a Reservation. +

    + +

    + The allocation is attempted by virtually substituting all previous + allocations related to this Reservation with new ones, that satisfy the new + {@link ReservationDefinition}. Upon success the previous allocation is + atomically substituted by the new one, and on failure (i.e., if the system + cannot find a valid allocation for the updated request), the previous + allocation remains valid. +

    + + @param request to update an existing Reservation (the + {@link ReservationUpdateRequest} should refer to an existing valid + {@link ReservationId}) + @return response empty on successfully updating the existing reservation + @throws YarnException if the request is invalid or reservation cannot be + updated successfully + @throws IOException]]> +
    +
    + + + + + + + The interface used by clients to remove an existing Reservation. +

    + + @param request to remove an existing Reservation (the + {@link ReservationDeleteRequest} should refer to an existing valid + {@link ReservationId}) + @return response empty on successfully deleting the existing reservation + @throws YarnException if the request is invalid or reservation cannot be + deleted successfully + @throws IOException]]> +
    +
    + + + + + + + The interface used by clients to get the list of reservations in a plan. + The reservationId will be used to search for reservations to list if it is + provided. Otherwise, it will select active reservations within the + startTime and endTime (inclusive). +

    + + @param request to list reservations in a plan. Contains fields to select + String queue, ReservationId reservationId, long startTime, + long endTime, and a bool includeReservationAllocations. + + queue: Required. Cannot be null or empty. Refers to the + reservable queue in the scheduler that was selected when + creating a reservation submission + {@link ReservationSubmissionRequest}. + + reservationId: Optional. If provided, other fields will + be ignored. + + startTime: Optional. If provided, only reservations that + end after the startTime will be selected. This defaults + to 0 if an invalid number is used. + + endTime: Optional. If provided, only reservations that + start on or before endTime will be selected. This defaults + to Long.MAX_VALUE if an invalid number is used. + + includeReservationAllocations: Optional. Flag that + determines whether the entire reservation allocations are + to be returned. Reservation allocations are subject to + change in the event of re-planning as described by + {@link ReservationDefinition}. + + @return response that contains information about reservations that are + being searched for. + @throws YarnException if the request is invalid + @throws IOException if the request failed otherwise]]> +
    +
    + + + + + + The interface used by client to get node to labels mappings in existing cluster +

    + + @return node to labels mappings + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + The interface used by client to get labels to nodes mapping + in existing cluster +

    + + @return node to labels mappings + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + The interface used by client to get labels to nodes mapping + for specified labels in existing cluster +

    + + @param labels labels for which labels to nodes mapping has to be retrieved + @return labels to nodes mappings for specific labels + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + The interface used by client to get node labels in the cluster +

    + + @return cluster node labels collection + @throws YarnException when there is a failure in + {@link ApplicationClientProtocol} + @throws IOException when there is a failure in + {@link ApplicationClientProtocol}]]> +
    +
    + + + + + + + + The interface used by client to set priority of an application +

    + @param applicationId + @param priority + @return updated priority of an application. + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + + Signal a container identified by given ID. +

    + + @param containerId + {@link ContainerId} of the container that needs to be signaled + @param command the signal container command + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + + + + + Get the resource profiles available in the RM. +

    + @return a Map of the resource profile names to their capabilities + @throws YARNFeatureNotEnabledException if resource-profile is disabled + @throws YarnException if any error happens inside YARN + @throws IOException in case of other errors]]> +
    +
    + + + + + + + Get the details of a specific resource profile from the RM. +

    + @param profile the profile name + @return resource profile name with its capabilities + @throws YARNFeatureNotEnabledException if resource-profile is disabled + @throws YarnException if any error happens inside YARN + @throws IOException in case of other others]]> +
    +
    + + + + + + Get available resource types supported by RM. +

    + @return list of supported resource types with detailed information + @throws YarnException if any issue happens inside YARN + @throws IOException in case of other others]]> +
    +
    + + + + + + The interface used by client to get node attributes in the cluster. +

    + + @return cluster node attributes collection + @throws YarnException when there is a failure in + {@link ApplicationClientProtocol} + @throws IOException when there is a failure in + {@link ApplicationClientProtocol}]]> +
    +
    + + + + + + + The interface used by client to get mapping of AttributeKey to associated + NodeToAttributeValue list for specified node attributeKeys in the cluster. +

    + + @param attributes AttributeKeys for which associated NodeToAttributeValue + mapping value has to be retrieved. If empty or null is set then + will return mapping for all attributeKeys in the cluster + @return mapping of AttributeKey to List of associated + NodeToAttributeValue's. + @throws YarnException + @throws IOException]]> +
    +
    + + + + + + + The interface used by client to get all node to attribute mapping in + existing cluster. +

    + + @param hostNames HostNames for which host to attributes mapping has to + be retrived.If empty or null is set then will return + all nodes to attributes mapping in cluster. + @return Node to attribute mappings + @throws YarnException + @throws IOException]]> +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + Create a new instance of AMRMClientAsync.

    + + @param intervalMs heartbeat interval in milliseconds between AM and RM + @param callbackHandler callback handler that processes responses from + the ResourceManager]]> +
    +
    + + + + + + Create a new instance of AMRMClientAsync.

    + + @param client the AMRMClient instance + @param intervalMs heartbeat interval in milliseconds between AM and RM + @param callbackHandler callback handler that processes responses from + the ResourceManager]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + RegisterApplicationMasterResponse + @throws YarnException + @throws IOException]]> + + + + + + + + + + + + + + + + allocate + @param req Resource request]]> + + + + + + + + + + + + + allocate. + Any previous pending resource change request of the same container will be + removed. + + Application that calls this method is expected to maintain the + Containers that are returned from previous successful + allocations or resource changes. By passing in the existing container and a + target resource capability to this method, the application requests the + ResourceManager to change the existing resource allocation to the target + resource allocation. + + @deprecated use + {@link #requestContainerUpdate(Container, UpdateContainerRequest)} + + @param container The container returned from the last successful resource + allocation or resource change + @param capability The target resource capability of the container]]> + + + + + + + allocate. + Any previous pending update request of the same container will be + removed. + + @param container The container returned from the last successful resource + allocation or update + @param updateContainerRequest The UpdateContainerRequest.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + check to return true for each 1000 ms. + See also {@link #waitFor(java.util.function.Supplier, int)} + and {@link #waitFor(java.util.function.Supplier, int, int)} + @param check the condition for which it should wait]]> + + + + + + + + check to return true for each + checkEveryMillis ms. + See also {@link #waitFor(java.util.function.Supplier, int, int)} + @param check user defined checker + @param checkEveryMillis interval to call check]]> + + + + + + + + + check to return true for each + checkEveryMillis ms. In the main loop, this method will log + the message "waiting in main loop" for each logInterval times + iteration to confirm the thread is alive. + @param check user defined checker + @param checkEveryMillis interval to call check + @param logInterval interval to log for each]]> + + + + + + + + + + AMRMClientAsync handles communication with the ResourceManager + and provides asynchronous updates on events such as container allocations and + completions. It contains a thread that sends periodic heartbeats to the + ResourceManager. + + It should be used by implementing a CallbackHandler: +
    + {@code
    + class MyCallbackHandler extends AMRMClientAsync.AbstractCallbackHandler {
    +   public void onContainersAllocated(List containers) {
    +     [run tasks on the containers]
    +   }
    +
    +   public void onContainersUpdated(List containers) {
    +     [determine if resource allocation of containers have been increased in
    +      the ResourceManager, and if so, inform the NodeManagers to increase the
    +      resource monitor/enforcement on the containers]
    +   }
    +
    +   public void onContainersCompleted(List statuses) {
    +     [update progress, check whether app is done]
    +   }
    +   
    +   public void onNodesUpdated(List updated) {}
    +   
    +   public void onReboot() {}
    + }
    + }
    + 
    + + The client's lifecycle should be managed similarly to the following: + +
    + {@code
    + AMRMClientAsync asyncClient = 
    +     createAMRMClientAsync(appAttId, 1000, new MyCallbackhandler());
    + asyncClient.init(conf);
    + asyncClient.start();
    + RegisterApplicationMasterResponse response = asyncClient
    +    .registerApplicationMaster(appMasterHostname, appMasterRpcPort,
    +       appMasterTrackingUrl);
    + asyncClient.addContainerRequest(containerRequest);
    + [... wait for application to complete]
    + asyncClient.unregisterApplicationMaster(status, appMsg, trackingUrl);
    + asyncClient.stop();
    + }
    + 
    ]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Update the resources of a container.

    + +

    The ApplicationMaster or other applications that use the + client must provide the details of the container, including the Id and + the target resource encapsulated in the updated container token via + {@link Container}. +

    + + @param container the container with updated token.]]> +
    +
    + + + + + + Re-Initialize the Container.

    + + @param containerId the Id of the container to Re-Initialize. + @param containerLaunchContex the updated ContainerLaunchContext. + @param autoCommit commit re-initialization automatically ?]]> +
    +
    + + + + Restart the specified container.

    + + @param containerId the Id of the container to restart.]]> +
    +
    + + + + Rollback last reInitialization of the specified container.

    + + @param containerId the Id of the container to restart.]]> +
    +
    + + + + Commit last reInitialization of the specified container.

    + + @param containerId the Id of the container to commit reInitialize.]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + NMClientAsync handles communication with all the NodeManagers + and provides asynchronous updates on getting responses from them. It + maintains a thread pool to communicate with individual NMs where a number of + worker threads process requests to NMs by using {@link NMClientImpl}. The max + size of the thread pool is configurable through + {@link YarnConfiguration#NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE}. + + It should be used in conjunction with a CallbackHandler. For example + +
    + {@code
    + class MyCallbackHandler extends NMClientAsync.AbstractCallbackHandler {
    +   public void onContainerStarted(ContainerId containerId,
    +       Map allServiceResponse) {
    +     [post process after the container is started, process the response]
    +   }
    +
    +   public void onContainerResourceIncreased(ContainerId containerId,
    +       Resource resource) {
    +     [post process after the container resource is increased]
    +   }
    +
    +   public void onContainerStatusReceived(ContainerId containerId,
    +       ContainerStatus containerStatus) {
    +     [make use of the status of the container]
    +   }
    +
    +   public void onContainerStopped(ContainerId containerId) {
    +     [post process after the container is stopped]
    +   }
    +
    +   public void onStartContainerError(
    +       ContainerId containerId, Throwable t) {
    +     [handle the raised exception]
    +   }
    +
    +   public void onGetContainerStatusError(
    +       ContainerId containerId, Throwable t) {
    +     [handle the raised exception]
    +   }
    +
    +   public void onStopContainerError(
    +       ContainerId containerId, Throwable t) {
    +     [handle the raised exception]
    +   }
    + }
    + }
    + 
    + + The client's life-cycle should be managed like the following: + +
    + {@code
    + NMClientAsync asyncClient = 
    +     NMClientAsync.createNMClientAsync(new MyCallbackhandler());
    + asyncClient.init(conf);
    + asyncClient.start();
    + asyncClient.startContainer(container, containerLaunchContext);
    + [... wait for container being started]
    + asyncClient.getContainerStatus(container.getId(), container.getNodeId(),
    +     container.getContainerToken());
    + [... handle the status in the callback instance]
    + asyncClient.stopContainer(container.getId(), container.getNodeId(),
    +     container.getContainerToken());
    + [... wait for container being stopped]
    + asyncClient.stop();
    + }
    + 
    ]]> +
    +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.2.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.2.2.xml new file mode 100644 index 0000000000000..8590265accd88 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.2.2.xml @@ -0,0 +1,3957 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Type of proxy. + @return Proxy to the ResourceManager for the specified client protocol. + @throws IOException]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Create a new instance of AppAdminClient. +

    + + @param appType application type + @param conf configuration + @return app admin client]]> +
    +
    + + + + + + + + + + Launch a new YARN application. +

    + + @param fileName specification of application + @param appName name of the application + @param lifetime lifetime of the application + @param queue queue of the application + @return exit code + @throws IOException IOException + @throws YarnException exception in client or server]]> +
    +
    + + + + + + + Stop a YARN application (attempt to stop gracefully before killing the + application). In the case of a long-running service, the service may be + restarted later. +

    + + @param appName the name of the application + @return exit code + @throws IOException IOException + @throws YarnException exception in client or server]]> +
    +
    + + + + + + + Start a YARN application from a previously saved specification. In the + case of a long-running service, the service must have been previously + launched/started and then stopped, or previously saved but not started. +

    + + @param appName the name of the application + @return exit code + @throws IOException IOException + @throws YarnException exception in client or server]]> +
    +
    + + + + + + + + + + Save the specification for a YARN application / long-running service. + The application may be started later. +

    + + @param fileName specification of application to save + @param appName name of the application + @param lifetime lifetime of the application + @param queue queue of the application + @return exit code + @throws IOException IOException + @throws YarnException exception in client or server]]> +
    +
    + + + + + + + Remove the specification and all application data for a YARN application. + The application cannot be running. +

    + + @param appName the name of the application + @return exit code + @throws IOException IOException + @throws YarnException exception in client or server]]> +
    +
    + + + + + + + + Change the number of running containers for a component of a YARN + application / long-running service. +

    + + @param appName the name of the application + @param componentCounts map of component name to new component count or + amount to change existing component count (e.g. + 5, +5, -5) + @return exit code + @throws IOException IOException + @throws YarnException exception in client or server]]> +
    +
    + + + + + + + Upload AM dependencies to HDFS. This makes future application launches + faster since the dependencies do not have to be uploaded on each launch. +

    + + @param destinationFolder + an optional HDFS folder where dependency tarball will be uploaded + @return exit code + @throws IOException + IOException + @throws YarnException + exception in client or server]]> +
    +
    + + + + + + + Get detailed app specific status string for a YARN application. +

    + + @param appIdOrName appId or appName + @return status string + @throws IOException IOException + @throws YarnException exception in client or server]]> +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + Send the information of a number of conceptual entities to the timeline + server. It is a blocking API. The method will not return until it gets the + response from the timeline server. +

    + + @param entities + the collection of {@link TimelineEntity} + @return the error information if the sent entities are not correctly stored + @throws IOException if there are I/O errors + @throws YarnException if entities are incomplete/invalid]]> +
    +
    + + + + + + + + + Send the information of a number of conceptual entities to the timeline + server. It is a blocking API. The method will not return until it gets the + response from the timeline server. + + This API is only for timeline service v1.5 +

    + + @param appAttemptId {@link ApplicationAttemptId} + @param groupId {@link TimelineEntityGroupId} + @param entities + the collection of {@link TimelineEntity} + @return the error information if the sent entities are not correctly stored + @throws IOException if there are I/O errors + @throws YarnException if entities are incomplete/invalid]]> +
    +
    + + + + + + + Send the information of a domain to the timeline server. It is a + blocking API. The method will not return until it gets the response from + the timeline server. +

    + + @param domain + an {@link TimelineDomain} object + @throws IOException + @throws YarnException]]> +
    +
    + + + + + + + + Send the information of a domain to the timeline server. It is a + blocking API. The method will not return until it gets the response from + the timeline server. + + This API is only for timeline service v1.5 +

    + + @param domain + an {@link TimelineDomain} object + @param appAttemptId {@link ApplicationAttemptId} + @throws IOException + @throws YarnException]]> +
    +
    + + + + + + + Get a delegation token so as to be able to talk to the timeline server in a + secure way. +

    + + @param renewer + Address of the renewer who can renew these tokens when needed by + securely talking to the timeline server + @return a delegation token ({@link Token}) that can be used to talk to the + timeline server + @throws IOException + @throws YarnException]]> +
    +
    + + + + + + + Renew a timeline delegation token. +

    + + @param timelineDT + the delegation token to renew + @return the new expiration time + @throws IOException + @throws YarnException]]> +
    +
    + + + + + + + Cancel a timeline delegation token. +

    + + @param timelineDT + the delegation token to cancel + @throws IOException + @throws YarnException]]> +
    +
    + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + parameterized event of type T]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + InputStream to be checksumed + @return the message digest of the input stream + @throws IOException]]> + + + + + + + + + + + + SharedCacheChecksum object based on the configurable + algorithm implementation + (see yarn.sharedcache.checksum.algo.impl) + + @return SharedCacheChecksum object]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The object type on which this state machine operates. + @param The state of the entity. + @param The external eventType to be handled. + @param The event object.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + When {@link #limit} would be reached on append, past messages will be + truncated from head, and a header telling the user about truncation will be + prepended, with ellipses in between header and messages. +

    + Note that header and ellipses are not counted against {@link #limit}. +

    + An example: + +

    + {@code
    +   // At the beginning it's an empty string
    +   final Appendable shortAppender = new BoundedAppender(80);
    +   // The whole message fits into limit
    +   shortAppender.append(
    +       "message1 this is a very long message but fitting into limit\n");
    +   // The first message is truncated, the second not
    +   shortAppender.append("message2 this is shorter than the previous one\n");
    +   // The first message is deleted, the second truncated, the third
    +   // preserved
    +   shortAppender.append("message3 this is even shorter message, maybe.\n");
    +   // The first two are deleted, the third one truncated, the last preserved
    +   shortAppender.append("message4 the shortest one, yet the greatest :)");
    +   // Current contents are like this:
    +   // Diagnostic messages truncated, showing last 80 chars out of 199:
    +   // ...s is even shorter message, maybe.
    +   // message4 the shortest one, yet the greatest :)
    + }
    + 
    +

    + Note that null values are {@link #append(CharSequence) append}ed + just like in {@link StringBuilder#append(CharSequence) original + implementation}. +

    + Note that this class is not thread safe.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.2.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.2.2.xml new file mode 100644 index 0000000000000..33ce392a76758 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.2.2.xml @@ -0,0 +1,1412 @@ + + + + + + + + + + + + + + + + + + + + + + + + true if the node is healthy, else false]]> + + + + + diagnostic health report of the node. + @return diagnostic health report of the node]]> + + + + + last timestamp at which the health report was received. + @return last timestamp at which the health report was received]]> + + + + + It includes information such as: +

      +
    • + An indicator of whether the node is healthy, as determined by the + health-check script. +
    • +
    • The previous time at which the health status was reported.
    • +
    • A diagnostic report on the health status.
    • +
    + + @see NodeReport + @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]> +
    +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + type of the proxy + @return the proxy instance + @throws IOException if fails to create the proxy]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true if the iteration has more elements.]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml index f748e36630768..8d4bbbd016c50 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-api - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN API @@ -35,8 +35,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava javax.xml.bind diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java index f5d8f02cb9a6b..533eaddabb4f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java @@ -224,6 +224,15 @@ enum Environment { @Private CLASSPATH_PREPEND_DISTCACHE("CLASSPATH_PREPEND_DISTCACHE"), + /** + * $LOCALIZATION_COUNTERS + * + * Since NM does not RPC Container JVM's we pass Localization counter + * vector as an environment variable + * + */ + LOCALIZATION_COUNTERS("LOCALIZATION_COUNTERS"), + /** * $CONTAINER_ID * Final, exported by NodeManager and non-modifiable by users. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java index d2adfdc478a7e..1d4ffc9b5b9ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java @@ -424,9 +424,9 @@ public abstract void setKeepContainersAcrossApplicationAttempts( /** * Set tags for the application. A maximum of - * {@link YarnConfiguration#APPLICATION_MAX_TAGS} are allowed + * {@link YarnConfiguration#RM_APPLICATION_MAX_TAGS} are allowed * per application. Each tag can be at most - * {@link YarnConfiguration#APPLICATION_MAX_TAG_LENGTH} + * {@link YarnConfiguration#RM_APPLICATION_MAX_TAG_LENGTH} * characters, and can contain only ASCII characters. * * @param tags tags to set diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerExitStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerExitStatus.java index 02070100e3bb7..26a204ea392f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerExitStatus.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerExitStatus.java @@ -78,4 +78,9 @@ public class ContainerExitStatus { */ public static final int KILLED_BY_CONTAINER_SCHEDULER = -108; + /** + * Container was terminated for generating excess log data. + */ + public static final int KILLED_FOR_EXCESS_LOGS = -109; + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index 83e1c5fe09b27..b91658f759c89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -31,6 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.impl.LightWeightResource; @@ -465,9 +466,13 @@ public int compareTo(Resource other) { @Override public String toString() { + return getFormattedString(String.valueOf(getMemorySize())); + } + + private String getFormattedString(String memory) { StringBuilder sb = new StringBuilder(); - sb.append("TimelineEntityGroupId is an abstract way for diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java index 655887a0c4a32..afa8e31427630 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java @@ -27,7 +27,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 67d1841e7d065..f560f733d813d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -93,10 +93,10 @@ public class YarnConfiguration extends Configuration { YARN_SITE_CONFIGURATION_FILE, CORE_SITE_CONFIGURATION_FILE)); - @Evolving + @Deprecated public static final int APPLICATION_MAX_TAGS = 10; - @Evolving + @Deprecated public static final int APPLICATION_MAX_TAG_LENGTH = 100; static { @@ -207,6 +207,18 @@ private static void addDeprecatedKeys() { public static final String DEFAULT_RM_ADDRESS = "0.0.0.0:" + DEFAULT_RM_PORT; + /**Max number of application tags.*/ + public static final String RM_APPLICATION_MAX_TAGS = RM_PREFIX + + "application.max-tags"; + + public static final int DEFAULT_RM_APPLICATION_MAX_TAGS = 10; + + /**Max length of each application tag.*/ + public static final String RM_APPLICATION_MAX_TAG_LENGTH = RM_PREFIX + + "application.max-tag.length"; + + public static final int DEFAULT_RM_APPLICATION_MAX_TAG_LENGTH = 100; + public static final String RM_APPLICATION_MASTER_SERVICE_PROCESSORS = RM_PREFIX + "application-master-service.processors"; @@ -502,12 +514,19 @@ public static boolean isAclEnabled(Configuration conf) { public static final int DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT = 1; /** - * The maximum number of application attempts. - * It's a global setting for all application masters. + * The maximum number of application attempts for + * an application, if unset by user. */ public static final String RM_AM_MAX_ATTEMPTS = RM_PREFIX + "am.max-attempts"; public static final int DEFAULT_RM_AM_MAX_ATTEMPTS = 2; + + /** + * The maximum number of application attempts. + * It's a global setting for all application masters. + */ + public static final String GLOBAL_RM_AM_MAX_ATTEMPTS = + RM_PREFIX + "am.global.max-attempts"; /** The keytab for the resource manager.*/ public static final String RM_KEYTAB = @@ -671,6 +690,30 @@ public static boolean isAclEnabled(Configuration conf) { RM_PREFIX + "nodemanagers.heartbeat-interval-ms"; public static final long DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS = 1000; + /** Enable Heartbeat Interval Scaling based on cpu utilization. */ + public static final String RM_NM_HEARTBEAT_INTERVAL_SCALING_ENABLE = + RM_PREFIX + "nodemanagers.heartbeat-interval-scaling-enable"; + public static final boolean + DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SCALING_ENABLE = false; + + public static final String RM_NM_HEARTBEAT_INTERVAL_MIN_MS = + RM_PREFIX + "nodemanagers.heartbeat-interval-min-ms"; + public static final long DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MIN_MS = 1000; + + public static final String RM_NM_HEARTBEAT_INTERVAL_MAX_MS = + RM_PREFIX + "nodemanagers.heartbeat-interval-max-ms"; + public static final long DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MAX_MS = 1000; + + public static final String RM_NM_HEARTBEAT_INTERVAL_SPEEDUP_FACTOR = + RM_PREFIX + "nodemanagers.heartbeat-interval-speedup-factor"; + public static final float + DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SPEEDUP_FACTOR = 1.0f; + + public static final String RM_NM_HEARTBEAT_INTERVAL_SLOWDOWN_FACTOR = + RM_PREFIX + "nodemanagers.heartbeat-interval-slowdown-factor"; + public static final float + DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SLOWDOWN_FACTOR = 1.0f; + /** Number of worker threads that write the history data. */ public static final String RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE = RM_PREFIX + "history-writer.multi-threaded-dispatcher.pool-size"; @@ -729,6 +772,9 @@ public static boolean isAclEnabled(Configuration conf) { RM_PREFIX + "delegation-token.max-conf-size-bytes"; public static final int DEFAULT_RM_DELEGATION_TOKEN_MAX_CONF_SIZE_BYTES = 12800; + public static final String RM_DELEGATION_TOKEN_ALWAYS_CANCEL = + RM_PREFIX + "delegation-token.always-cancel"; + public static final boolean DEFAULT_RM_DELEGATION_TOKEN_ALWAYS_CANCEL = false; public static final String RM_DT_RENEWER_THREAD_TIMEOUT = RM_PREFIX + "delegation-token-renewer.thread-timeout"; @@ -1192,6 +1238,15 @@ public static boolean isAclEnabled(Configuration conf) { public static final String NM_ADMIN_USER_ENV = NM_PREFIX + "admin-env"; public static final String DEFAULT_NM_ADMIN_USER_ENV = "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX"; + /** + * PATH components that will be prepended to the user's path. + * If this is defined and the user does not define PATH, NM will also + * append ":$PATH" to prevent this from eclipsing the PATH defined in + * the container. This feature is only available for Linux. + * */ + public static final String NM_ADMIN_FORCE_PATH = NM_PREFIX + "force.path"; + public static final String DEFAULT_NM_ADMIN_FORCE_PATH = ""; + /** Environment variables that containers may override rather than use NodeManager's default.*/ public static final String NM_ENV_WHITELIST = NM_PREFIX + "env-whitelist"; public static final String DEFAULT_NM_ENV_WHITELIST = StringUtils.join(",", @@ -1891,6 +1946,25 @@ public static boolean isAclEnabled(Configuration conf) { public static final String APPLICATION_TAG_BASED_PLACEMENT_USER_WHITELIST = APPLICATION_TAG_BASED_PLACEMENT_PREFIX + ".username.whitelist"; + /** Enable switch for container log monitoring. */ + public static final String NM_CONTAINER_LOG_MONITOR_ENABLED = + NM_PREFIX + "container-log-monitor.enable"; + public static final boolean DEFAULT_NM_CONTAINER_LOG_MONITOR_ENABLED = false; + /** How often to monitor logs generated by containers. */ + public static final String NM_CONTAINER_LOG_MON_INTERVAL_MS = + NM_PREFIX + "container-log-monitor.interval-ms"; + public static final int DEFAULT_NM_CONTAINER_LOG_MON_INTERVAL_MS = 60000; + /** The disk space limit for a single container log directory. */ + public static final String NM_CONTAINER_LOG_DIR_SIZE_LIMIT_BYTES = + NM_PREFIX + "container-log-monitor.dir-size-limit-bytes"; + public static final long DEFAULT_NM_CONTAINER_LOG_DIR_SIZE_LIMIT_BYTES = + 1000000000L; + /** The disk space limit for all of a container's logs. */ + public static final String NM_CONTAINER_LOG_TOTAL_SIZE_LIMIT_BYTES = + NM_PREFIX + "container-log-monitor.total-size-limit-bytes"; + public static final long DEFAULT_NM_CONTAINER_LOG_TOTAL_SIZE_LIMIT_BYTES = + 10000000000L; + /** Enable/disable container metrics. */ @Private public static final String NM_CONTAINER_METRICS_ENABLE = @@ -1946,6 +2020,8 @@ public static boolean isAclEnabled(Configuration conf) { * marked as offline. Values can range from 0.0 to 100.0. If the value is * greater than or equal to 100, NM will check for full disk. This applies to * nm-local-dirs and nm-log-dirs. + * + * This applies when disk-utilization-threshold.enabled is true. */ public static final String NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE = NM_DISK_HEALTH_CHECK_PREFIX + "max-disk-utilization-per-disk-percentage"; @@ -1955,6 +2031,17 @@ public static boolean isAclEnabled(Configuration conf) { public static final float DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE = 90.0F; + /** + * Enable/Disable the disk utilisation percentage + * threshold for disk health checker. + */ + public static final String NM_DISK_UTILIZATION_THRESHOLD_ENABLED = + NM_DISK_HEALTH_CHECK_PREFIX + + "disk-utilization-threshold.enabled"; + + public static final + boolean DEFAULT_NM_DISK_UTILIZATION_THRESHOLD_ENABLED = true; + /** * The low threshold percentage of disk space used when an offline disk is * marked as online. Values can range from 0.0 to 100.0. The value shouldn't @@ -1970,9 +2057,23 @@ public static boolean isAclEnabled(Configuration conf) { /** * The minimum space that must be available on a local dir for it to be used. * This applies to nm-local-dirs and nm-log-dirs. + * + * This applies when disk-free-space-threshold.enabled is true. */ public static final String NM_MIN_PER_DISK_FREE_SPACE_MB = NM_DISK_HEALTH_CHECK_PREFIX + "min-free-space-per-disk-mb"; + + /** + * Enable/Disable the minimum disk free + * space threshold for disk health checker. + */ + public static final String NM_DISK_FREE_SPACE_THRESHOLD_ENABLED = + NM_DISK_HEALTH_CHECK_PREFIX + + "disk-free-space-threshold.enabled"; + + public static final boolean + DEFAULT_NM_DISK_FREE_SPACE_THRESHOLD_ENABLED = true; + /** * The minimum space that must be available on an offline * disk for it to be marked as online. The value should not be less @@ -1999,6 +2100,13 @@ public static boolean isAclEnabled(Configuration conf) { NM_PREFIX + "health-checker.interval-ms"; public static final long DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS = 10 * 60 * 1000; + /** Whether or not to run the node health script before the NM + * starts up.*/ + public static final String NM_HEALTH_CHECK_RUN_BEFORE_STARTUP = + NM_PREFIX + "health-checker.run-before-startup"; + public static final boolean DEFAULT_NM_HEALTH_CHECK_RUN_BEFORE_STARTUP = + false; + /** Health check time out period for all scripts.*/ public static final String NM_HEALTH_CHECK_TIMEOUT_MS = NM_PREFIX + "health-checker.timeout-ms"; @@ -2369,6 +2477,18 @@ public static boolean isAclEnabled(Configuration conf) { public static final boolean DEFAULT_NM_DOCKER_ALLOW_HOST_PID_NAMESPACE = false; + public static final String YARN_HTTP_WEBAPP_EXTERNAL_CLASSES = + "yarn.http.rmwebapp.external.classes"; + + public static final String YARN_HTTP_WEBAPP_SCHEDULER_PAGE = + "yarn.http.rmwebapp.scheduler.page.class"; + + public static final String YARN_HTTP_WEBAPP_CUSTOM_DAO_CLASSES = + "yarn.http.rmwebapp.custom.dao.classes"; + + public static final String YARN_HTTP_WEBAPP_CUSTOM_UNWRAPPED_DAO_CLASSES = + "yarn.http.rmwebapp.custom.unwrapped.dao.classes"; + /** * Whether or not users are allowed to request that Docker containers honor * the debug deletion delay. This is useful for troubleshooting Docker diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java index 0a28887b90e97..85352b3f5d10c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.util.constraint; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/csi/CsiConfigUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/csi/CsiConfigUtils.java index e1177053fdf72..428fedbc1f502 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/csi/CsiConfigUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/csi/CsiConfigUtils.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.util.csi; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java index a86db15116c1c..f7e7f35a89e27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.util.resource; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java index dda30c76eff45..708ac928b39a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.resource; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import java.util.Iterator; import java.util.Map; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java index 79ff70a7afa32..5047d8eb86860 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.conf; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.junit.jupiter.api.Test; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml index 7b021f7d8b66b..2b79d9ffa1224 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-docker/pom.xml @@ -23,7 +23,7 @@ hadoop-yarn-applications-catalog org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Application Catalog Docker Image diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml index 0924f6e59ddb3..9179acba5a1ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/pom.xml @@ -23,7 +23,7 @@ hadoop-yarn-applications-catalog org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Application Catalog Webapp diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/webapp/partials/details.html b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/webapp/partials/details.html index 8624440ea43df..64df3b7caeb50 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/webapp/partials/details.html +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/webapp/partials/details.html @@ -18,7 +18,7 @@ Stop Upgrade {{details.yarnfile.state}} @@ -53,7 +53,7 @@

    {{docker.name}}

    - + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml index f62a8f468f853..0054a669921df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/pom.xml @@ -19,7 +19,7 @@ hadoop-yarn-applications org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index ee41ffb1e8a20..15ddf2af17479 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn-applications org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0hadoop-yarn-applications-distributedshell - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOTApache Hadoop YARN DistributedShell @@ -50,8 +50,8 @@ log4j - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava commons-cli diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index bb300db26d25a..ae14d0931abd6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -48,7 +48,7 @@ import java.util.Arrays; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; @@ -128,7 +128,7 @@ import org.apache.hadoop.yarn.util.timeline.TimelineUtils; import org.apache.log4j.LogManager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.client.ClientHandlerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 8ba391e78f6e7..5da4384b00b8b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -32,8 +32,9 @@ import java.util.Vector; import java.util.Arrays; import java.util.Base64; +import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; @@ -97,7 +98,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -143,6 +144,9 @@ public class Client { private static final int DEFAULT_AM_VCORES = 1; private static final int DEFAULT_CONTAINER_MEMORY = 10; private static final int DEFAULT_CONTAINER_VCORES = 1; + + // check the application once per second. + private static final int APP_MONITOR_INTERVAL = 1000; // Configuration private Configuration conf; @@ -209,7 +213,7 @@ public class Client { private String rollingFilesPattern = ""; // Start time for client - private final long clientStartTime = System.currentTimeMillis(); + private long clientStartTime = System.currentTimeMillis(); // Timeout threshold for client. Kill app after time interval expires. private long clientTimeout = 600000; @@ -250,6 +254,10 @@ public class Client { // Command line options private Options opts; + private final AtomicBoolean stopSignalReceived; + private final AtomicBoolean isRunning; + private final Object objectLock = new Object(); + private static final String shellCommandPath = "shellCommands"; private static final String shellArgsPath = "shellArgs"; private static final String appMasterJarPath = "AppMaster.jar"; @@ -410,6 +418,8 @@ public Client(Configuration conf) throws Exception { opts.addOption("application_tags", true, "Application tags."); opts.addOption("localize_files", true, "List of files, separated by comma" + " to be localized for the command"); + stopSignalReceived = new AtomicBoolean(false); + isRunning = new AtomicBoolean(false); } /** @@ -667,9 +677,11 @@ public boolean init(String[] args) throws ParseException { * @throws YarnException */ public boolean run() throws IOException, YarnException { - LOG.info("Running Client"); + isRunning.set(true); yarnClient.start(); + // set the client start time. + clientStartTime = System.currentTimeMillis(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" @@ -983,7 +995,6 @@ public boolean run() throws IOException, YarnException { if (keepContainers) { vargs.add("--keep_containers_across_application_attempts"); } - for (Map.Entry entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } @@ -1110,13 +1121,24 @@ void specifyLogAggregationContext(ApplicationSubmissionContext appContext) { private boolean monitorApplication(ApplicationId appId) throws YarnException, IOException { - while (true) { - + boolean res = false; + boolean needForceKill = false; + while (isRunning.get()) { // Check app status every 1 second. try { - Thread.sleep(1000); + synchronized (objectLock) { + objectLock.wait(APP_MONITOR_INTERVAL); + } + needForceKill = stopSignalReceived.get(); } catch (InterruptedException e) { - LOG.debug("Thread sleep in monitoring loop interrupted"); + LOG.warn("Thread sleep in monitoring loop interrupted"); + // if the application is to be killed when client times out; + // then set needForceKill to true + break; + } finally { + if (needForceKill) { + break; + } } // Get application report for the appId we are interested in @@ -1139,22 +1161,20 @@ private boolean monitorApplication(ApplicationId appId) FinalApplicationStatus dsStatus = report.getFinalApplicationStatus(); if (YarnApplicationState.FINISHED == state) { if (FinalApplicationStatus.SUCCEEDED == dsStatus) { - LOG.info("Application has completed successfully. Breaking monitoring loop"); - return true; - } - else { - LOG.info("Application did finished unsuccessfully." - + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() - + ". Breaking monitoring loop"); - return false; + LOG.info("Application has completed successfully. " + + "Breaking monitoring loop"); + res = true; + } else { + LOG.info("Application did finished unsuccessfully. " + + "YarnState={}, DSFinalStatus={}. Breaking monitoring loop", + state, dsStatus); } - } - else if (YarnApplicationState.KILLED == state + break; + } else if (YarnApplicationState.KILLED == state || YarnApplicationState.FAILED == state) { - LOG.info("Application did not finish." - + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() - + ". Breaking monitoring loop"); - return false; + LOG.info("Application did not finish. YarnState={}, DSFinalStatus={}. " + + "Breaking monitoring loop", state, dsStatus); + break; } // The value equal or less than 0 means no timeout @@ -1162,11 +1182,18 @@ else if (YarnApplicationState.KILLED == state && System.currentTimeMillis() > (clientStartTime + clientTimeout)) { LOG.info("Reached client specified timeout for application. " + "Killing application"); - forceKillApplication(appId); - return false; + needForceKill = true; + break; } } + if (needForceKill) { + forceKillApplication(appId); + } + + isRunning.set(false); + + return res; } /** @@ -1377,4 +1404,31 @@ static Map parseResourcesString(String resourcesStr) { } return resources; } + + @VisibleForTesting + protected void sendStopSignal() { + LOG.info("Sending stop Signal to Client"); + stopSignalReceived.set(true); + synchronized (objectLock) { + objectLock.notifyAll(); + } + int waitCount = 0; + LOG.info("Waiting for Client to exit loop"); + while (!isRunning.get()) { + try { + Thread.sleep(50); + } catch (InterruptedException ie) { + // do nothing + } finally { + waitCount++; + if (isRunning.get() || waitCount > 2000) { + break; + } + } + } + LOG.info("Stopping yarnClient within the Client"); + yarnClient.stop(); + yarnClient.waitForServiceToStop(clientTimeout); + LOG.info("done stopping Client"); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java index f3571a6b81aa6..0094219584687 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java @@ -40,7 +40,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java index 942d1c94c33c3..4222fd0b566d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java @@ -27,6 +27,7 @@ import java.io.BufferedReader; import java.io.ByteArrayOutputStream; import java.io.File; +import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; @@ -38,9 +39,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; @@ -88,6 +90,7 @@ import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants; import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants; import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.timeline.NameValuePair; import org.apache.hadoop.yarn.server.timeline.PluginStoreTestUtils; import org.apache.hadoop.yarn.server.timeline.TimelineVersion; @@ -107,6 +110,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestName; import org.junit.rules.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -127,6 +131,9 @@ public class TestDistributedShell { private static final float DEFAULT_TIMELINE_VERSION = 1.0f; private static final String TIMELINE_AUX_SERVICE_NAME = "timeline_collector"; private static final int MIN_ALLOCATION_MB = 128; + private static final int TEST_TIME_OUT = 150000; + // set the timeout of the yarnClient to be 95% of the globalTimeout. + private static final int TEST_TIME_WINDOW_EXPIRE = (TEST_TIME_OUT * 90) / 100; protected final static String APPMASTER_JAR = JarFinder.getJar(ApplicationMaster.class); @@ -134,11 +141,30 @@ public class TestDistributedShell { @Rule public TimelineVersionWatcher timelineVersionWatcher = new TimelineVersionWatcher(); + @Rule - public Timeout globalTimeout = new Timeout(90000); + public Timeout globalTimeout = new Timeout(TEST_TIME_OUT, + TimeUnit.MILLISECONDS); + @Rule public TemporaryFolder tmpFolder = new TemporaryFolder(); + @Rule + public TestName name = new TestName(); + + // set the timeout of the yarnClient to be 95% of the globalTimeout. + private final String yarnClientTimeout = + String.valueOf(TEST_TIME_WINDOW_EXPIRE); + + private final String[] commonArgs = { + "--jar", + APPMASTER_JAR, + "--timeout", + yarnClientTimeout, + "--appname", + "" + }; + @Before public void setup() throws Exception { setupInternal(NUM_NMS, timelineVersionWatcher.getTimelineVersion()); @@ -157,6 +183,7 @@ private void setupInternal(int numNodeManager, float timelineVersion) MIN_ALLOCATION_MB); // reduce the teardown waiting time conf.setLong(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 1000); + conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 500); conf.set("yarn.log.dir", "target"); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); // mark if we need to launch the v1 timeline server @@ -182,6 +209,8 @@ private void setupInternal(int numNodeManager, float timelineVersion) true); conf.setBoolean( YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true); + conf.setInt(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, + 10); conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER, YarnConfiguration.PROCESSOR_RM_PLACEMENT_CONSTRAINTS_HANDLER); // ATS version specific settings @@ -190,11 +219,10 @@ private void setupInternal(int numNodeManager, float timelineVersion) conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT); } else if (timelineVersion == 1.5f) { - if (hdfsCluster == null) { - HdfsConfiguration hdfsConfig = new HdfsConfiguration(); - hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig) - .numDataNodes(1).build(); - } + HdfsConfiguration hdfsConfig = new HdfsConfiguration(); + hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig) + .numDataNodes(1).build(); + hdfsCluster.waitActive(); fs = hdfsCluster.getFileSystem(); PluginStoreTestUtils.prepareFileSystemForPluginStore(fs); PluginStoreTestUtils.prepareConfiguration(conf, hdfsCluster); @@ -220,39 +248,39 @@ private void setupInternal(int numNodeManager, float timelineVersion) } else { Assert.fail("Wrong timeline version number: " + timelineVersion); } - - if (yarnCluster == null) { - yarnCluster = - new MiniYARNCluster(TestDistributedShell.class.getSimpleName(), 1, - numNodeManager, 1, 1); - yarnCluster.init(conf); - - yarnCluster.start(); - - conf.set( - YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, - MiniYARNCluster.getHostname() + ":" - + yarnCluster.getApplicationHistoryServer().getPort()); - - waitForNMsToRegister(); - - URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml"); - if (url == null) { - throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath"); - } - Configuration yarnClusterConfig = yarnCluster.getConfig(); - yarnClusterConfig.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, - new File(url.getPath()).getParent()); - //write the document to a buffer (not directly to the file, as that - //can cause the file being written to get read -which will then fail. - ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); - yarnClusterConfig.writeXml(bytesOut); - bytesOut.close(); - //write the bytes to the file in the classpath - OutputStream os = new FileOutputStream(new File(url.getPath())); - os.write(bytesOut.toByteArray()); - os.close(); + + yarnCluster = + new MiniYARNCluster(TestDistributedShell.class.getSimpleName(), 1, + numNodeManager, 1, 1); + yarnCluster.init(conf); + yarnCluster.start(); + + conf.set( + YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, + MiniYARNCluster.getHostname() + ":" + + yarnCluster.getApplicationHistoryServer().getPort()); + + waitForNMsToRegister(); + + URL url = Thread.currentThread().getContextClassLoader().getResource( + "yarn-site.xml"); + if (url == null) { + throw new RuntimeException( + "Could not find 'yarn-site.xml' dummy file in classpath"); } + Configuration yarnClusterConfig = yarnCluster.getConfig(); + yarnClusterConfig.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, + new File(url.getPath()).getParent()); + //write the document to a buffer (not directly to the file, as that + //can cause the file being written to get read -which will then fail. + ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); + yarnClusterConfig.writeXml(bytesOut); + bytesOut.close(); + //write the bytes to the file in the classpath + OutputStream os = new FileOutputStream(url.getPath()); + os.write(bytesOut.toByteArray()); + os.close(); + FileContext fsContext = FileContext.getLocalFSFileContext(); fsContext .delete( @@ -267,6 +295,11 @@ private void setupInternal(int numNodeManager, float timelineVersion) @After public void tearDown() throws IOException { + FileContext fsContext = FileContext.getLocalFSFileContext(); + fsContext + .delete( + new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH)), + true); if (yarnCluster != null) { try { yarnCluster.stop(); @@ -281,11 +314,6 @@ public void tearDown() throws IOException { hdfsCluster = null; } } - FileContext fsContext = FileContext.getLocalFSFileContext(); - fsContext - .delete( - new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH)), - true); } @Test @@ -334,9 +362,7 @@ public void testDSShellWithoutDomainV2CustomizedFlow() throws Exception { public void testDSShell(boolean haveDomain, boolean defaultFlow) throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "2", "--shell_command", @@ -348,8 +374,8 @@ public void testDSShell(boolean haveDomain, boolean defaultFlow) "--container_memory", "128", "--container_vcores", - "1" - }; + "1"); + if (haveDomain) { String[] domainArgs = { "--domain", @@ -380,6 +406,7 @@ public void testDSShell(boolean haveDomain, boolean defaultFlow) } LOG.info("Initializing DS Client"); + YarnClient yarnClient; final Client client = new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); @@ -396,7 +423,7 @@ public void run() { }; t.start(); - YarnClient yarnClient = YarnClient.createYarnClient(); + yarnClient = YarnClient.createYarnClient(); yarnClient.init(new Configuration(yarnCluster.getConfig())); yarnClient.start(); @@ -404,15 +431,15 @@ public void run() { String errorMessage = ""; ApplicationId appId = null; ApplicationReport appReport = null; - while(!verified) { + while (!verified) { List apps = yarnClient.getApplications(); - if (apps.size() == 0 ) { + if (apps.size() == 0) { Thread.sleep(10); continue; } appReport = apps.get(0); appId = appReport.getApplicationId(); - if(appReport.getHost().equals("N/A")) { + if (appReport.getHost().equals("N/A")) { Thread.sleep(10); continue; } @@ -425,7 +452,7 @@ public void run() { if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED && appReport.getFinalApplicationStatus() != - FinalApplicationStatus.UNDEFINED) { + FinalApplicationStatus.UNDEFINED) { break; } } @@ -452,11 +479,10 @@ public void run() { } } - TimelineDomain domain = null; if (!isTestingTimelineV2) { checkTimelineV1(haveDomain); } else { - checkTimelineV2(haveDomain, appId, defaultFlow, appReport); + checkTimelineV2(appId, defaultFlow, appReport); } } @@ -478,8 +504,8 @@ private void checkTimelineV1(boolean haveDomain) throws Exception { Assert.assertEquals(1, entitiesAttempts.getEntities().size()); Assert.assertEquals(2, entitiesAttempts.getEntities().get(0).getEvents() .size()); - Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType() - .toString(), ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString()); + Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType(), + ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString()); if (haveDomain) { Assert.assertEquals(domain.getId(), entitiesAttempts.getEntities().get(0).getDomainId()); @@ -501,8 +527,8 @@ private void checkTimelineV1(boolean haveDomain) throws Exception { null, null, null, null, primaryFilter, null, null, null); Assert.assertNotNull(entities); Assert.assertEquals(2, entities.getEntities().size()); - Assert.assertEquals(entities.getEntities().get(0).getEntityType() - .toString(), ApplicationMaster.DSEntity.DS_CONTAINER.toString()); + Assert.assertEquals(entities.getEntities().get(0).getEntityType(), + ApplicationMaster.DSEntity.DS_CONTAINER.toString()); String entityId = entities.getEntities().get(0).getEntityId(); org.apache.hadoop.yarn.api.records.timeline.TimelineEntity entity = @@ -521,7 +547,7 @@ private void checkTimelineV1(boolean haveDomain) throws Exception { } } - private void checkTimelineV2(boolean haveDomain, ApplicationId appId, + private void checkTimelineV2(ApplicationId appId, boolean defaultFlow, ApplicationReport appReport) throws Exception { LOG.info("Started checkTimelineV2 "); // For PoC check using the file-based timeline writer (YARN-3264) @@ -624,7 +650,13 @@ private void checkTimelineV2(boolean haveDomain, ApplicationId appId, verifyEntityForTimelineV2(appAttemptEntityFile, AppAttemptMetricsConstants.FINISHED_EVENT_TYPE, 1, 1, 0, true); } finally { - FileUtils.deleteDirectory(tmpRootFolder.getParentFile()); + try { + FileUtils.deleteDirectory(tmpRootFolder.getParentFile()); + } catch (FileNotFoundException ex) { + // the recursive delete can throw an exception when one of the file + // does not exist. + LOG.warn("Exception deleting a file/subDirectory: {}", ex.getMessage()); + } } } @@ -662,7 +694,7 @@ private void verifyEntityForTimelineV2(File entityFile, String expectedEvent, long actualCount = 0; for (int i = 0; i < checkTimes; i++) { BufferedReader reader = null; - String strLine = null; + String strLine; actualCount = 0; try { reader = new BufferedReader(new FileReader(entityFile)); @@ -699,7 +731,9 @@ private void verifyEntityForTimelineV2(File entityFile, String expectedEvent, } } } finally { - reader.close(); + if (reader != null) { + reader.close(); + } } if (numOfExpectedEvent == actualCount) { break; @@ -716,14 +750,35 @@ private void verifyEntityForTimelineV2(File entityFile, String expectedEvent, * Utility function to merge two String arrays to form a new String array for * our argumemts. * - * @param args - * @param newArgs + * @param args the first set of the arguments. + * @param newArgs the second set of the arguments. * @return a String array consists of {args, newArgs} */ private String[] mergeArgs(String[] args, String[] newArgs) { - List argsList = new ArrayList(Arrays.asList(args)); - argsList.addAll(Arrays.asList(newArgs)); - return argsList.toArray(new String[argsList.size()]); + int length = args.length + newArgs.length; + String[] result = new String[length]; + System.arraycopy(args, 0, result, 0, args.length); + System.arraycopy(newArgs, 0, result, args.length, newArgs.length); + return result; + } + + private String generateAppName(String postFix) { + return name.getMethodName().replaceFirst("test", "") + .concat(postFix == null? "" : "-" + postFix); + } + + private String[] createArguments(String... args) { + String[] res = mergeArgs(commonArgs, args); + // set the application name so we can track down which command is running. + res[commonArgs.length - 1] = generateAppName(null); + return res; + } + + private String[] createArgsWithPostFix(int index, String... args) { + String[] res = mergeArgs(commonArgs, args); + // set the application name so we can track down which command is running. + res[commonArgs.length - 1] = generateAppName(String.valueOf(index)); + return res; } protected String getSleepCommand(int sec) { @@ -734,9 +789,7 @@ protected String getSleepCommand(int sec) { @Test public void testDSRestartWithPreviousRunningContainers() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "1", "--shell_command", @@ -746,20 +799,20 @@ public void testDSRestartWithPreviousRunningContainers() throws Exception { "--container_memory", "128", "--keep_containers_across_application_attempts" - }; + ); - LOG.info("Initializing DS Client"); - Client client = new Client(TestDSFailedAppMaster.class.getName(), + LOG.info("Initializing DS Client"); + Client client = new Client(TestDSFailedAppMaster.class.getName(), new Configuration(yarnCluster.getConfig())); - client.init(args); - LOG.info("Running DS Client"); - boolean result = client.run(); + client.init(args); - LOG.info("Client run completed. Result=" + result); - // application should succeed - Assert.assertTrue(result); - } + LOG.info("Running DS Client"); + boolean result = client.run(); + LOG.info("Client run completed. Result=" + result); + // application should succeed + Assert.assertTrue(result); + } /* * The sleeping period in TestDSSleepingAppMaster is set as 5 seconds. @@ -769,9 +822,7 @@ public void testDSRestartWithPreviousRunningContainers() throws Exception { */ @Test public void testDSAttemptFailuresValidityIntervalSucess() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "1", "--shell_command", @@ -782,22 +833,23 @@ public void testDSAttemptFailuresValidityIntervalSucess() throws Exception { "128", "--attempt_failures_validity_interval", "2500" - }; + ); - LOG.info("Initializing DS Client"); - Configuration conf = yarnCluster.getConfig(); - conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); - Client client = new Client(TestDSSleepingAppMaster.class.getName(), - new Configuration(conf)); + LOG.info("Initializing DS Client"); + Configuration config = yarnCluster.getConfig(); + config.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); + Client client = new Client(TestDSSleepingAppMaster.class.getName(), + new Configuration(config)); - client.init(args); - LOG.info("Running DS Client"); - boolean result = client.run(); + client.init(args); - LOG.info("Client run completed. Result=" + result); - // application should succeed - Assert.assertTrue(result); - } + LOG.info("Running DS Client"); + boolean result = client.run(); + + LOG.info("Client run completed. Result=" + result); + // application should succeed + Assert.assertTrue(result); + } /* * The sleeping period in TestDSSleepingAppMaster is set as 5 seconds. @@ -807,9 +859,7 @@ public void testDSAttemptFailuresValidityIntervalSucess() throws Exception { */ @Test public void testDSAttemptFailuresValidityIntervalFailed() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "1", "--shell_command", @@ -820,22 +870,23 @@ public void testDSAttemptFailuresValidityIntervalFailed() throws Exception { "128", "--attempt_failures_validity_interval", "15000" - }; + ); - LOG.info("Initializing DS Client"); - Configuration conf = yarnCluster.getConfig(); - conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); - Client client = new Client(TestDSSleepingAppMaster.class.getName(), - new Configuration(conf)); + LOG.info("Initializing DS Client"); + Configuration config = yarnCluster.getConfig(); + config.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); + Client client = new Client(TestDSSleepingAppMaster.class.getName(), + new Configuration(config)); - client.init(args); - LOG.info("Running DS Client"); - boolean result = client.run(); + client.init(args); - LOG.info("Client run completed. Result=" + result); - // application should be failed - Assert.assertFalse(result); - } + LOG.info("Running DS Client"); + boolean result = client.run(); + + LOG.info("Client run completed. Result=" + result); + // application should be failed + Assert.assertFalse(result); + } @Test public void testDSShellWithCustomLogPropertyFile() throws Exception { @@ -854,9 +905,7 @@ public void testDSShellWithCustomLogPropertyFile() throws Exception { // set the output to DEBUG level fileWriter.write("log4j.rootLogger=debug,stdout"); fileWriter.close(); - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "3", "--shell_command", @@ -873,7 +922,7 @@ public void testDSShellWithCustomLogPropertyFile() throws Exception { "128", "--container_vcores", "1" - }; + ); //Before run the DS, the default the log level is INFO final Logger LOG_Client = @@ -889,6 +938,7 @@ public void testDSShellWithCustomLogPropertyFile() throws Exception { new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); + LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); @@ -903,14 +953,12 @@ public void testDSShellWithCustomLogPropertyFile() throws Exception { @Test public void testSpecifyingLogAggregationContext() throws Exception { String regex = ".*(foo|bar)\\d"; - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--shell_command", "echo", "--rolling_log_pattern", regex - }; + ); final Client client = new Client(new Configuration(yarnCluster.getConfig())); Assert.assertTrue(client.init(args)); @@ -925,9 +973,7 @@ public void testSpecifyingLogAggregationContext() throws Exception { public void testDSShellWithCommands() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "2", "--shell_command", @@ -940,7 +986,7 @@ public void testDSShellWithCommands() throws Exception { "128", "--container_vcores", "1" - }; + ); LOG.info("Initializing DS Client"); final Client client = @@ -948,18 +994,20 @@ public void testDSShellWithCommands() throws Exception { boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); - boolean result = client.run(); - LOG.info("Client run completed. Result=" + result); - List expectedContent = new ArrayList(); - expectedContent.add("output_expected"); - verifyContainerLog(2, expectedContent, false, ""); + try { + boolean result = client.run(); + LOG.info("Client run completed. Result=" + result); + List expectedContent = new ArrayList<>(); + expectedContent.add("output_expected"); + verifyContainerLog(2, expectedContent, false, ""); + } finally { + client.sendStopSignal(); + } } @Test public void testDSShellWithMultipleArgs() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "4", "--shell_command", @@ -974,7 +1022,7 @@ public void testDSShellWithMultipleArgs() throws Exception { "128", "--container_vcores", "1" - }; + ); LOG.info("Initializing DS Client"); final Client client = @@ -982,9 +1030,10 @@ public void testDSShellWithMultipleArgs() throws Exception { boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); + boolean result = client.run(); LOG.info("Client run completed. Result=" + result); - List expectedContent = new ArrayList(); + List expectedContent = new ArrayList<>(); expectedContent.add("HADOOP YARN MAPREDUCE HDFS"); verifyContainerLog(4, expectedContent, false, ""); } @@ -1006,10 +1055,8 @@ public void testDSShellWithShellScript() throws Exception { // set the output to DEBUG level fileWriter.write("echo testDSShellWithShellScript"); fileWriter.close(); - System.out.println(customShellScript.getAbsolutePath()); - String[] args = { - "--jar", - APPMASTER_JAR, + LOG.info(customShellScript.getAbsolutePath()); + String[] args = createArguments( "--num_containers", "1", "--shell_script", @@ -1022,7 +1069,7 @@ public void testDSShellWithShellScript() throws Exception { "128", "--container_vcores", "1" - }; + ); LOG.info("Initializing DS Client"); final Client client = @@ -1032,7 +1079,7 @@ public void testDSShellWithShellScript() throws Exception { LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); - List expectedContent = new ArrayList(); + List expectedContent = new ArrayList<>(); expectedContent.add("testDSShellWithShellScript"); verifyContainerLog(1, expectedContent, false, ""); } @@ -1040,7 +1087,7 @@ public void testDSShellWithShellScript() throws Exception { @Test public void testDSShellWithInvalidArgs() throws Exception { Client client = new Client(new Configuration(yarnCluster.getConfig())); - + int appNameCounter = 0; LOG.info("Initializing DS Client with no args"); try { client.init(new String[]{}); @@ -1052,7 +1099,7 @@ public void testDSShellWithInvalidArgs() throws Exception { LOG.info("Initializing DS Client with no jar file"); try { - String[] args = { + String[] args = createArgsWithPostFix(appNameCounter++, "--num_containers", "2", "--shell_command", @@ -1061,8 +1108,9 @@ public void testDSShellWithInvalidArgs() throws Exception { "512", "--container_memory", "128" - }; - client.init(args); + ); + String[] argsNoJar = Arrays.copyOfRange(args, 2, args.length); + client.init(argsNoJar); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", @@ -1071,16 +1119,14 @@ public void testDSShellWithInvalidArgs() throws Exception { LOG.info("Initializing DS Client with no shell command"); try { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArgsWithPostFix(appNameCounter++, "--num_containers", "2", "--master_memory", "512", "--container_memory", "128" - }; + ); client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { @@ -1090,9 +1136,7 @@ public void testDSShellWithInvalidArgs() throws Exception { LOG.info("Initializing DS Client with invalid no. of containers"); try { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArgsWithPostFix(appNameCounter++, "--num_containers", "-1", "--shell_command", @@ -1101,7 +1145,7 @@ public void testDSShellWithInvalidArgs() throws Exception { "512", "--container_memory", "128" - }; + ); client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { @@ -1111,9 +1155,7 @@ public void testDSShellWithInvalidArgs() throws Exception { LOG.info("Initializing DS Client with invalid no. of vcores"); try { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArgsWithPostFix(appNameCounter++, "--num_containers", "2", "--shell_command", @@ -1126,7 +1168,7 @@ public void testDSShellWithInvalidArgs() throws Exception { "128", "--container_vcores", "1" - }; + ); client.init(args); client.run(); Assert.fail("Exception is expected"); @@ -1137,9 +1179,7 @@ public void testDSShellWithInvalidArgs() throws Exception { LOG.info("Initializing DS Client with --shell_command and --shell_script"); try { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArgsWithPostFix(appNameCounter++, "--num_containers", "2", "--shell_command", @@ -1154,7 +1194,7 @@ public void testDSShellWithInvalidArgs() throws Exception { "1", "--shell_script", "test.sh" - }; + ); client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { @@ -1165,9 +1205,7 @@ public void testDSShellWithInvalidArgs() throws Exception { LOG.info("Initializing DS Client without --shell_command and --shell_script"); try { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArgsWithPostFix(appNameCounter++, "--num_containers", "2", "--master_memory", @@ -1178,7 +1216,7 @@ public void testDSShellWithInvalidArgs() throws Exception { "128", "--container_vcores", "1" - }; + ); client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { @@ -1189,9 +1227,7 @@ public void testDSShellWithInvalidArgs() throws Exception { LOG.info("Initializing DS Client with invalid container_type argument"); try { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArgsWithPostFix(appNameCounter++, "--num_containers", "2", "--master_memory", @@ -1206,13 +1242,46 @@ public void testDSShellWithInvalidArgs() throws Exception { "date", "--container_type", "UNSUPPORTED_TYPE" - }; + ); client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", e.getMessage().contains("Invalid container_type: UNSUPPORTED_TYPE")); } + + try { + String[] args = createArgsWithPostFix(appNameCounter++, + "--num_containers", + "1", + "--shell_command", + Shell.WINDOWS ? "dir" : "ls", + "--master_resources", + "memory-mb=invalid" + ); + client.init(args); + Assert.fail("Exception is expected"); + } catch (IllegalArgumentException e) { + // do nothing + LOG.info("IllegalArgumentException exception is expected: {}", + e.getMessage()); + } + + try { + String[] args = createArgsWithPostFix(appNameCounter++, + "--num_containers", + "1", + "--shell_command", + Shell.WINDOWS ? "dir" : "ls", + "--master_resources" + ); + client.init(args); + Assert.fail("Exception is expected"); + } catch (MissingArgumentException e) { + // do nothing + LOG.info("MissingArgumentException exception is expected: {}", + e.getMessage()); + } } @Test @@ -1247,50 +1316,45 @@ protected TimelineWriter createTimelineWriter(Configuration conf, } protected void waitForNMsToRegister() throws Exception { - int sec = 60; - while (sec >= 0) { - if (yarnCluster.getResourceManager().getRMContext().getRMNodes().size() - >= NUM_NMS) { - break; + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + RMContext rmContext = yarnCluster.getResourceManager().getRMContext(); + return (rmContext.getRMNodes().size() >= NUM_NMS); } - Thread.sleep(1000); - sec--; - } + }, 100, 60000); } @Test public void testContainerLaunchFailureHandling() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, - "--num_containers", - "2", - "--shell_command", - Shell.WINDOWS ? "dir" : "ls", - "--master_memory", - "512", - "--container_memory", - "128" - }; + String[] args = createArguments( + "--num_containers", + "2", + "--shell_command", + Shell.WINDOWS ? "dir" : "ls", + "--master_memory", + "512", + "--container_memory", + "128" + ); LOG.info("Initializing DS Client"); Client client = new Client(ContainerLaunchFailAppMaster.class.getName(), - new Configuration(yarnCluster.getConfig())); + new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); - boolean result = client.run(); - - LOG.info("Client run completed. Result=" + result); - Assert.assertFalse(result); - + try { + boolean result = client.run(); + Assert.assertFalse(result); + } finally { + client.sendStopSignal(); + } } @Test public void testDebugFlag() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "2", "--shell_command", @@ -1304,7 +1368,7 @@ public void testDebugFlag() throws Exception { "--container_vcores", "1", "--debug" - }; + ); LOG.info("Initializing DS Client"); Client client = new Client(new Configuration(yarnCluster.getConfig())); @@ -1337,7 +1401,7 @@ private int verifyContainerLog(int containerNum, for (File output : containerFiles[i].listFiles()) { if (output.getName().trim().contains("stdout")) { BufferedReader br = null; - List stdOutContent = new ArrayList(); + List stdOutContent = new ArrayList<>(); try { String sCurrentLine; @@ -1369,13 +1433,13 @@ private int verifyContainerLog(int containerNum, Assert.assertTrue(stdOutContent.containsAll(expectedContent)); } } catch (IOException e) { - e.printStackTrace(); + LOG.error("Exception reading the buffer", e); } finally { try { if (br != null) br.close(); } catch (IOException ex) { - ex.printStackTrace(); + LOG.error("Exception closing the bufferReader", ex); } } } @@ -1386,17 +1450,21 @@ private int verifyContainerLog(int containerNum, @Test public void testDistributedShellResourceProfiles() throws Exception { + int appNameCounter = 0; String[][] args = { - {"--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_command", + createArgsWithPostFix(appNameCounter++, + "--num_containers", "1", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--container_resource_profile", - "maximum" }, - {"--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_command", + "maximum"), + createArgsWithPostFix(appNameCounter++, + "--num_containers", "1", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_resource_profile", - "default" }, - {"--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_command", + "default"), + createArgsWithPostFix(appNameCounter++, + "--num_containers", "1", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_resource_profile", - "default", "--container_resource_profile", "maximum" } - }; + "default", "--container_resource_profile", "maximum"), + }; for (int i = 0; i < args.length; ++i) { LOG.info("Initializing DS Client"); @@ -1416,9 +1484,7 @@ public void testDistributedShellResourceProfiles() throws Exception { public void testDSShellWithOpportunisticContainers() throws Exception { Client client = new Client(new Configuration(yarnCluster.getConfig())); try { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "2", "--master_memory", @@ -1433,22 +1499,24 @@ public void testDSShellWithOpportunisticContainers() throws Exception { "date", "--container_type", "OPPORTUNISTIC" - }; + ); client.init(args); - client.run(); + assertTrue(client.run()); } catch (Exception e) { - Assert.fail("Job execution with opportunistic containers failed."); + LOG.error("Job execution with opportunistic containers failed.", e); + Assert.fail("Exception. " + e.getMessage()); + } finally { + client.sendStopSignal(); } } @Test @TimelineVersion(2.0f) public void testDSShellWithEnforceExecutionType() throws Exception { + YarnClient yarnClient = null; Client client = new Client(new Configuration(yarnCluster.getConfig())); try { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "2", "--master_memory", @@ -1464,7 +1532,7 @@ public void testDSShellWithEnforceExecutionType() throws Exception { "--container_type", "OPPORTUNISTIC", "--enforce_execution_type" - }; + ); client.init(args); final AtomicBoolean result = new AtomicBoolean(false); Thread t = new Thread() { @@ -1478,7 +1546,7 @@ public void run() { }; t.start(); - YarnClient yarnClient = YarnClient.createYarnClient(); + yarnClient = YarnClient.createYarnClient(); yarnClient.init(new Configuration(yarnCluster.getConfig())); yarnClient.start(); waitForContainersLaunch(yarnClient, 2); @@ -1502,7 +1570,13 @@ public void run() { } } } catch (Exception e) { - Assert.fail("Job execution with enforce execution type failed."); + LOG.error("Job execution with enforce execution type failed.", e); + Assert.fail("Exception. " + e.getMessage()); + } finally { + client.sendStopSignal(); + if (yarnClient != null) { + yarnClient.stop(); + } } } @@ -1551,24 +1625,22 @@ public void doTestDistributedShellWithResources(boolean largeContainers) .getResourceScheduler().getClusterResource(); String masterMemoryString = "1 Gi"; String containerMemoryString = "512 Mi"; - long masterMemory = 1024; - long containerMemory = 512; + long[] memVars = {1024, 512}; + Assume.assumeTrue("The cluster doesn't have enough memory for this test", - clusterResource.getMemorySize() >= masterMemory + containerMemory); + clusterResource.getMemorySize() >= memVars[0] + memVars[1]); Assume.assumeTrue("The cluster doesn't have enough cores for this test", clusterResource.getVirtualCores() >= 2); if (largeContainers) { - masterMemory = clusterResource.getMemorySize() * 2 / 3; - masterMemory = masterMemory - masterMemory % MIN_ALLOCATION_MB; - masterMemoryString = masterMemory + "Mi"; - containerMemory = clusterResource.getMemorySize() / 3; - containerMemory = containerMemory - containerMemory % MIN_ALLOCATION_MB; - containerMemoryString = String.valueOf(containerMemory); + memVars[0] = clusterResource.getMemorySize() * 2 / 3; + memVars[0] = memVars[0] - memVars[0] % MIN_ALLOCATION_MB; + masterMemoryString = memVars[0] + "Mi"; + memVars[1] = clusterResource.getMemorySize() / 3; + memVars[1] = memVars[1] - memVars[1] % MIN_ALLOCATION_MB; + containerMemoryString = String.valueOf(memVars[1]); } - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "2", "--shell_command", @@ -1576,8 +1648,8 @@ public void doTestDistributedShellWithResources(boolean largeContainers) "--master_resources", "memory=" + masterMemoryString + ",vcores=1", "--container_resources", - "memory=" + containerMemoryString + ",vcores=1", - }; + "memory=" + containerMemoryString + ",vcores=1" + ); LOG.info("Initializing DS Client"); Client client = new Client(new Configuration(yarnCluster.getConfig())); @@ -1599,97 +1671,81 @@ public void run() { yarnClient.init(new Configuration(yarnCluster.getConfig())); yarnClient.start(); - while (true) { - List apps = yarnClient.getApplications(); - if (apps.isEmpty()) { - Thread.sleep(10); - continue; - } - ApplicationReport appReport = apps.get(0); - ApplicationId appId = appReport.getApplicationId(); - List appAttempts = - yarnClient.getApplicationAttempts(appId); - if (appAttempts.isEmpty()) { - Thread.sleep(10); - continue; - } - ApplicationAttemptReport appAttemptReport = appAttempts.get(0); - ContainerId amContainerId = appAttemptReport.getAMContainerId(); - - if (amContainerId == null) { - Thread.sleep(10); - continue; - } - ContainerReport report = yarnClient.getContainerReport(amContainerId); - Resource masterResource = report.getAllocatedResource(); - Assert.assertEquals(masterMemory, masterResource.getMemorySize()); - Assert.assertEquals(1, masterResource.getVirtualCores()); - - List containers = - yarnClient.getContainers(appAttemptReport.getApplicationAttemptId()); - if (containers.size() < 2) { - Thread.sleep(10); - continue; - } - for (ContainerReport container : containers) { - if (!container.getContainerId().equals(amContainerId)) { - Resource containerResource = container.getAllocatedResource(); - Assert.assertEquals(containerMemory, - containerResource.getMemorySize()); - Assert.assertEquals(1, containerResource.getVirtualCores()); + final AtomicBoolean testFailed = new AtomicBoolean(false); + try { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + if (testFailed.get()) { + return true; + } + List containers; + try { + List apps = yarnClient.getApplications(); + if (apps.isEmpty()) { + return false; + } + ApplicationReport appReport = apps.get(0); + ApplicationId appId = appReport.getApplicationId(); + List appAttempts = + yarnClient.getApplicationAttempts(appId); + if (appAttempts.isEmpty()) { + return false; + } + ApplicationAttemptReport appAttemptReport = appAttempts.get(0); + ContainerId amContainerId = appAttemptReport.getAMContainerId(); + if (amContainerId == null) { + return false; + } + ContainerReport report = yarnClient.getContainerReport( + amContainerId); + Resource masterResource = report.getAllocatedResource(); + Assert.assertEquals(memVars[0], + masterResource.getMemorySize()); + Assert.assertEquals(1, masterResource.getVirtualCores()); + containers = yarnClient.getContainers( + appAttemptReport.getApplicationAttemptId()); + if (containers.size() < 2) { + return false; + } + for (ContainerReport container : containers) { + if (!container.getContainerId().equals(amContainerId)) { + Resource containerResource = container.getAllocatedResource(); + Assert.assertEquals(memVars[1], + containerResource.getMemorySize()); + Assert.assertEquals(1, containerResource.getVirtualCores()); + } + } + return true; + } catch (Exception ex) { + LOG.error("Error waiting for expected results", ex); + testFailed.set(true); + } + return false; } + }, 10, TEST_TIME_WINDOW_EXPIRE); + assertFalse(testFailed.get()); + } finally { + LOG.info("Signaling Client to Stop"); + client.sendStopSignal(); + if (yarnClient != null) { + LOG.info("Stopping yarnClient service"); + yarnClient.stop(); } - - return; } } - @Test(expected=IllegalArgumentException.class) - public void testDistributedShellAMResourcesWithIllegalArguments() - throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, - "--num_containers", - "1", - "--shell_command", - Shell.WINDOWS ? "dir" : "ls", - "--master_resources", - "memory-mb=invalid" - }; - Client client = new Client(new Configuration(yarnCluster.getConfig())); - client.init(args); - } - - @Test(expected=MissingArgumentException.class) - public void testDistributedShellAMResourcesWithMissingArgumentValue() - throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, - "--num_containers", - "1", - "--shell_command", - Shell.WINDOWS ? "dir" : "ls", - "--master_resources" - }; - Client client = new Client(new Configuration(yarnCluster.getConfig())); - client.init(args); - } - @Test(expected=ResourceNotFoundException.class) public void testDistributedShellAMResourcesWithUnknownResource() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "1", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_resources", "unknown-resource=5" - }; + ); Client client = new Client(new Configuration(yarnCluster.getConfig())); client.init(args); client.run(); @@ -1698,16 +1754,14 @@ public void testDistributedShellAMResourcesWithUnknownResource() @Test(expected=IllegalArgumentException.class) public void testDistributedShellNonExistentQueue() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "1", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--queue", "non-existent-queue" - }; + ); Client client = new Client(new Configuration(yarnCluster.getConfig())); client.init(args); client.run(); @@ -1716,9 +1770,7 @@ public void testDistributedShellNonExistentQueue() @Test public void testDistributedShellWithSingleFileLocalization() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "1", "--shell_command", @@ -1727,7 +1779,7 @@ public void testDistributedShellWithSingleFileLocalization() "./src/test/resources/a.txt", "--shell_args", "a.txt" - }; + ); Client client = new Client(new Configuration(yarnCluster.getConfig())); client.init(args); @@ -1737,9 +1789,7 @@ public void testDistributedShellWithSingleFileLocalization() @Test public void testDistributedShellWithMultiFileLocalization() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "1", "--shell_command", @@ -1748,7 +1798,7 @@ public void testDistributedShellWithMultiFileLocalization() "./src/test/resources/a.txt,./src/test/resources/b.txt", "--shell_args", "a.txt b.txt" - }; + ); Client client = new Client(new Configuration(yarnCluster.getConfig())); client.init(args); @@ -1758,9 +1808,7 @@ public void testDistributedShellWithMultiFileLocalization() @Test(expected=UncheckedIOException.class) public void testDistributedShellWithNonExistentFileLocalization() throws Exception { - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "1", "--shell_command", @@ -1769,11 +1817,11 @@ public void testDistributedShellWithNonExistentFileLocalization() "/non/existing/path/file.txt", "--shell_args", "file.txt" - }; + ); Client client = new Client(new Configuration(yarnCluster.getConfig())); client.init(args); - client.run(); + assertTrue(client.run()); } @@ -1781,34 +1829,34 @@ public void testDistributedShellWithNonExistentFileLocalization() public void testDistributedShellCleanup() throws Exception { String appName = "DistributedShellCleanup"; - String[] args = { - "--jar", - APPMASTER_JAR, + String[] args = createArguments( "--num_containers", "1", "--shell_command", - Shell.WINDOWS ? "dir" : "ls", - "--appname", - appName - }; + Shell.WINDOWS ? "dir" : "ls" + ); Configuration config = new Configuration(yarnCluster.getConfig()); Client client = new Client(config); - client.init(args); - client.run(); - ApplicationId appId = client.getAppId(); - String relativePath = - ApplicationMaster.getRelativePath(appName, appId.toString(), ""); - FileSystem fs1 = FileSystem.get(config); - Path path = new Path(fs1.getHomeDirectory(), relativePath); + try { + client.init(args); + client.run(); + ApplicationId appId = client.getAppId(); + String relativePath = + ApplicationMaster.getRelativePath(appName, appId.toString(), ""); + FileSystem fs1 = FileSystem.get(config); + Path path = new Path(fs1.getHomeDirectory(), relativePath); - GenericTestUtils.waitFor(() -> { - try { - return !fs1.exists(path); - } catch (IOException e) { - return false; - } - }, 10, 60000); + GenericTestUtils.waitFor(() -> { + try { + return !fs1.exists(path); + } catch (IOException e) { + return false; + } + }, 10, 60000); - assertFalse("Distributed Shell Cleanup failed", fs1.exists(path)); + assertFalse("Distributed Shell Cleanup failed", fs1.exists(path)); + } finally { + client.sendStopSignal(); + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml index 28e4a5bc9ec95..0e3b46891b838 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/hadoop-yarn-applications-mawo-core/pom.xml @@ -15,7 +15,7 @@ hadoop-yarn-applications-mawo org.apache.hadoop.applications.mawo - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml index 5594a30fed505..9366d73755d38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-mawo/pom.xml @@ -15,7 +15,7 @@ hadoop-yarn-applications org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml index 1d2a23245d030..03b3f3afb194c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn-applications org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-applications-unmanaged-am-launcher - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Unmanaged Am Launcher diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml index 36364bf2a80f3..ab4b896a38701 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml @@ -19,7 +19,7 @@ org.apache.hadoop hadoop-yarn-services - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT hadoop-yarn-services-api Apache Hadoop YARN Services API @@ -152,8 +152,8 @@ commons-lang3 - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava com.sun.jersey diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java index 339fa4b5a45cc..0ccc149098c15 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java @@ -30,10 +30,10 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.UriBuilder; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.codec.binary.Base64; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java index db11f409d399d..f331da35670b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.yarn.service.client; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java index 88aeefd8738a7..94b3a01e3f2ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java @@ -17,9 +17,9 @@ package org.apache.hadoop.yarn.service.webapp; -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Inject; import com.google.inject.Singleton; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java index 733b9bcffafe6..cf04e12d6d27d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java @@ -32,7 +32,7 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.service.api.records.Artifact; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java index 65849a6dddd5f..20b7f49a27d0f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java @@ -26,7 +26,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml index e22139eb5e0b8..cbac6f2d86e50 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml @@ -19,7 +19,7 @@ org.apache.hadoop hadoop-yarn-services - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT hadoop-yarn-services-core jar @@ -124,8 +124,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java index 87791536492f5..eb999cb9a5b5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.service; -import com.google.common.base.Preconditions; -import com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; import org.apache.hadoop.yarn.service.api.records.Service; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java index 3c8fed68a1286..85f7d77e0f4e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.service; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.EventHandler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java index 9ac1753880d47..670fc21f0dcc3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; import org.apache.commons.lang3.StringUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java index 458a7a1c5c1e7..fa94491ec4c94 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.service; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource.Builder; @@ -737,7 +737,7 @@ public void onContainersCompleted(List statuses) { LOG.warn( "Container {} Completed. No component instance exists. exitStatus={}. diagnostics={} ", containerId, status.getExitStatus(), status.getDiagnostics()); - return; + continue; } ComponentEvent event = new ComponentEvent(instance.getCompName(), CONTAINER_COMPLETED) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java index cc726a851c27c..98f71268fbe9c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.service.api.records; import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import com.google.gson.annotations.SerializedName; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index b3ac7bbe748eb..78db4b4e0dff1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.client; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java index cbc489c4e69ed..1f3ca22f08532 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.service.component; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ExecutionType; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java index 7966ac2cf8c1d..9ecfd3c41dbbe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.component; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java index 700408e6633e5..2f75dc468322b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java @@ -20,7 +20,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.registry.client.binding.RegistryPathUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java index 889da6e15ace0..932f97c4797ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.component.instance; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.event.AbstractEvent; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java index d1dda38eeb560..ffc0daa022210 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.service.containerlaunch; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerRetryContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java index 5ed56e31c46d2..0f9aebe354370 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.containerlaunch; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.service.utils.ServiceUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java index 1574d6df1ee64..ac79aa4e74f7c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.containerlaunch; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.Container; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java index bb47971bb7b8d..69e16281cd78e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.service.containerlaunch; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.service.utils.ServiceUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java index 0c1de58902e94..79d6773d0eca7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java @@ -20,7 +20,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -58,7 +58,7 @@ public class ClientAMProtocolPBClientImpl public ClientAMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ClientAMProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(ClientAMProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java index 96b24d2013b14..00e072ddb5bdb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.provider; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.service.api.records.Service; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java index 999a8dc9cb2c8..ac38b23fd5d64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java @@ -26,7 +26,7 @@ import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class DefaultClientProvider extends AbstractClientProvider { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java index cecca5f6cf266..788e7e2281be0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.registry; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java index 86896b2b69426..ba9ebc2ffaa74 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.registry.client.api.RegistryOperations; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java index fe8cce85434d0..5d103a98855fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.service.exceptions.BadConfigException; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java index 0ee8e83980753..1026092f54bb7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/HttpUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/HttpUtil.java index ac5c079b60e7b..11190ebb53588 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/HttpUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/HttpUtil.java @@ -30,12 +30,10 @@ import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSException; import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; -import org.ietf.jgss.Oid; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,8 +70,6 @@ public static String generateToken(String server) throws @Override public String run() throws Exception { try { - // This Oid for Kerberos GSS-API mechanism. - Oid mechOid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID"); GSSManager manager = GSSManager.getInstance(); // GSS name for server GSSName serverName = manager.createName("HTTP@" + server, @@ -81,8 +77,9 @@ public String run() throws Exception { // Create a GSSContext for authentication with the service. // We're passing client credentials as null since we want them to // be read from the Subject. + // We're passing Oid as null to use the default. GSSContext gssContext = manager.createContext( - serverName.canonicalize(mechOid), mechOid, null, + serverName.canonicalize(null), null, null, GSSContext.DEFAULT_LIFETIME); gssContext.requestMutualAuth(true); gssContext.requestCredDeleg(true); @@ -95,9 +92,8 @@ public String run() throws Exception { LOG.debug("Got valid challenge for host {}", serverName); return new String(BASE_64_CODEC.encode(outToken), StandardCharsets.US_ASCII); - } catch (GSSException | IllegalAccessException - | NoSuchFieldException | ClassNotFoundException e) { - LOG.error("Error: {}", e); + } catch (GSSException e) { + LOG.error("Error: ", e); throw new AuthenticationException(e); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java index 76a2fbfe52dda..8e8c2ea929e00 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.service.utils; -import com.google.common.base.Charsets; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java index 3780c99c24f4c..1931d859cf087 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java @@ -19,11 +19,11 @@ package org.apache.hadoop.yarn.service.utils; import com.fasterxml.jackson.databind.PropertyNamingStrategy; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.Multimap; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -250,7 +250,7 @@ public static void validateAndResolveService(Service service, public static void validateJvmOpts(String jvmOpts) throws IllegalArgumentException { - Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\"\\/,`;]"); + Pattern pattern = Pattern.compile("[!~#?@*&%${}()<>\\[\\]|\",`;]"); Matcher matcher = pattern.matcher(jvmOpts); if (matcher.find()) { throw new IllegalArgumentException( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java index 34d2ba3bb830a..2de2223cec133 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.lang3.ArrayUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java index 60eb9b4019f96..aa669878b5e78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import com.google.common.net.HostAndPort; +import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.service.exceptions.BadConfigException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockRunningServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockRunningServiceContext.java index c67c28909b7e4..4e3fc0999eeb3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockRunningServiceContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockRunningServiceContext.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service; -import com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.registry.client.api.RegistryOperations; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java index ce4f9dd2b9051..cf2b1f28920d8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.service; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.registry.client.api.RegistryOperations; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java index e5c35275c9532..cd148ced48a86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java @@ -19,9 +19,9 @@ package org.apache.hadoop.yarn.service; import com.fasterxml.jackson.databind.PropertyNamingStrategy; -import com.google.common.base.Throwables; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; +import org.apache.hadoop.thirdparty.com.google.common.base.Throwables; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; import org.apache.commons.io.FileUtils; import org.apache.curator.test.TestingCluster; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java index 304e74078e97d..4e773dc6f74de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestDefaultUpgradeComponentsFinder.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.service; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.yarn.service.api.records.Component; import org.apache.hadoop.yarn.service.api.records.ConfigFile; import org.apache.hadoop.yarn.service.api.records.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java index bbcbee246802c..fa96642affe6f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java @@ -18,26 +18,33 @@ package org.apache.hadoop.yarn.service; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.commons.io.FileUtils; import org.apache.curator.test.TestingCluster; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Event; +import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.DockerCredentialTokenIdentifier; import org.apache.hadoop.yarn.service.api.records.Artifact; import org.apache.hadoop.yarn.service.api.records.Component; import org.apache.hadoop.yarn.service.api.records.ResourceInformation; import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; import org.apache.hadoop.yarn.service.component.ComponentState; import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState; @@ -47,7 +54,9 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,6 +72,8 @@ import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM; import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class TestServiceAM extends ServiceTestUtils{ @@ -72,6 +83,9 @@ public class TestServiceAM extends ServiceTestUtils{ private File basedir; YarnConfiguration conf = new YarnConfiguration(); TestingCluster zkCluster; + @Rule + public ServiceTestUtils.ServiceFSWatcher rule = + new ServiceTestUtils.ServiceFSWatcher(); @Before public void setup() throws Exception { @@ -311,6 +325,80 @@ public void testScheduleWithMultipleResourceTypes() am.stop(); } + @Test + public void testContainerCompletedEventProcessed() throws Exception { + ServiceContext context = createServiceContext("abc"); + MockServiceScheduler scheduler = new MockServiceScheduler(context); + scheduler.init(conf); + ApplicationId appId = ApplicationId.newInstance(0, 0); + ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, + 1); + ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 0); + ContainerStatus containerStatus1 = ContainerStatus.newInstance(containerId1, + org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE, + "successful", 0); + ContainerId containerId2 = ContainerId.newContainerId(appAttemptId, 1); + ContainerStatus containerStatus2 = ContainerStatus.newInstance(containerId2, + org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE, + "successful", 0); + ComponentInstance instance = Mockito.mock(ComponentInstance.class); + Mockito.doReturn("componentInstance").when(instance).getCompName(); + scheduler.addLiveCompInstance(containerId2, instance); + List statuses = new ArrayList<>(); + // First container instance will be null + statuses.add(containerStatus1); + // Second container instance is added + scheduler.addLiveCompInstance(containerId2, instance); + statuses.add(containerStatus2); + scheduler.callbackHandler.onContainersCompleted(statuses); + // For second container event should be dispatched. + verify(scheduler.dispatcher, times(1)).getEventHandler(); + DefaultMetricsSystem.shutdown(); + } + + private ServiceContext createServiceContext(String name) + throws Exception { + Artifact artifact = new Artifact(); + artifact.setId("1"); + artifact.setType(Artifact.TypeEnum.TARBALL); + Service serviceDef = ServiceTestUtils.createExampleApplication(); + ApplicationId applicationId = ApplicationId.newInstance( + System.currentTimeMillis(), 1); + serviceDef.setId(applicationId.toString()); + serviceDef.setName(name); + serviceDef.setState(ServiceState.STARTED); + serviceDef.getComponents().forEach(component -> + component.setArtifact(artifact)); + ServiceContext context = new MockRunningServiceContext(rule, + serviceDef); + context.scheduler.getDispatcher().setDrainEventsOnStop(); + context.scheduler.getDispatcher().start(); + return context; + } + + class MockServiceScheduler extends ServiceScheduler { + private AsyncDispatcher dispatcher; + private AMRMClientCallback callbackHandler = new AMRMClientCallback(); + + MockServiceScheduler(ServiceContext context) { + super(context); + } + + @Override + protected AsyncDispatcher createAsyncDispatcher() { + dispatcher = Mockito.mock(AsyncDispatcher.class); + EventHandler handler = Mockito.mock(EventHandler.class); + Mockito.doReturn(handler).when(dispatcher).getEventHandler(); + return dispatcher; + } + + @Override + protected AMRMClientAsync createAMRMClient() { + return AMRMClientAsync.createAMRMClientAsync(1000, callbackHandler); + } + + } + @Test public void testRecordTokensForContainers() throws Exception { ApplicationId applicationId = ApplicationId.newInstance(123456, 1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java index 4ede8cfbb93ba..ca1a8faf16c63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.service; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.registry.client.binding.RegistryPathUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java index c66c4aedf89f7..af898a8f7fe86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceClient.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.client; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java index 3b9ce4359a4d6..488c1038e0d99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.component.instance; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestAbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestAbstractProviderService.java index f0525aae2329b..01bec79f16de6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestAbstractProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestAbstractProviderService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.provider; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestFilterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestFilterUtils.java index a6be4b192165d..70c2b648bd8f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestFilterUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestFilterUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto; import org.apache.hadoop.yarn.service.MockRunningServiceContext; import org.apache.hadoop.yarn.service.ServiceContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java index a93f3d9279696..2a611717f2c4c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java @@ -46,10 +46,12 @@ import java.util.List; import static org.assertj.core.api.Assertions.assertThat; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME; import static org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; /** * Test for ServiceApiUtil helper methods. @@ -766,10 +768,19 @@ public void run() { Assert.assertTrue(thread.isAlive()); } - @Test(expected = IllegalArgumentException.class) - public void testJvmOpts() { - String jvmOpts = "`ping -c 3 example.com`"; - ServiceApiUtil.validateJvmOpts(jvmOpts); + @Test + public void testJvmOpts() throws Exception { + String invalidJvmOpts = "`ping -c 3 example.com`"; + intercept(IllegalArgumentException.class, + "Invalid character in yarn.service.am.java.opts.", + () -> ServiceApiUtil.validateJvmOpts(invalidJvmOpts)); + String validJvmOpts = "-Dyarn.service.am.java.opts=-Xmx768m " + + "-Djava.security.auth.login.config=/opt/hadoop/etc/jaas-zk.conf"; + try { + ServiceApiUtil.validateJvmOpts(validJvmOpts); + } catch (Exception ex) { + fail("Invalid character in yarn.service.am.java.opts."); + } } public static Service createExampleApplication() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml index 43ea41d69b67b..8525500351d0f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml @@ -19,7 +19,7 @@ hadoop-yarn-applications org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-services diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml index cab01298782b3..3ef8b8e4b02f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-applications - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Applications pom diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml index 5d431d1b50f13..da70e781b3434 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml @@ -17,10 +17,10 @@ hadoop-yarn org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT hadoop-yarn-client - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Client @@ -39,8 +39,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava commons-cli diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java index 3840fa57450b2..19cbdd74a8d60 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java @@ -25,7 +25,7 @@ import java.util.function.Supplier; import java.util.List; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; @@ -48,8 +48,8 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java index c2c262069f188..17e1072fb6ce6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java @@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.client.api.async.NMClientAsync; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * NMTokenCache manages NMTokens required for an Application Master diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java index 688c8436d509d..0b98feb73e036 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java @@ -55,8 +55,8 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java index 62e2d993e4d44..2c2fcf213dace 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java @@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.client.api.impl.NMClientImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * NMClientAsync handles communication with all the NodeManagers diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java index 922b185a1a2dd..05c62a4aee68e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java @@ -54,7 +54,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java index 9cd653bed4b7c..fa9e0bf4bb1ba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java @@ -59,8 +59,8 @@ import org.apache.hadoop.yarn.state.StateMachine; import org.apache.hadoop.yarn.state.StateMachineFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java index 19a75460140ec..b6a0c591c9077 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.client.api.impl; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 84d67b4bace0f..dbaeaac750275 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -79,9 +79,9 @@ import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.RackResolver; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.util.resource.Resources; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java index ed544f2287da0..61b6f4ae3506f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java @@ -43,7 +43,7 @@ import org.apache.hadoop.yarn.security.NMTokenIdentifier; import org.apache.hadoop.yarn.util.ConverterUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java index fcc48e24974b2..7854407da6871 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java @@ -27,7 +27,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java index 4763b530bf9c3..321b63602f54e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java @@ -43,7 +43,7 @@ import org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory; import org.apache.hadoop.yarn.util.Records; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java index 14133ba4ecd1e..655adb589dd07 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java @@ -142,7 +142,7 @@ import org.eclipse.jetty.websocket.client.ClientUpgradeRequest; import org.eclipse.jetty.websocket.client.WebSocketClient; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index cfb3b2923d30d..8c43e74a7023d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -60,7 +60,7 @@ import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.util.Times; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import static org.apache.hadoop.yarn.util.StringHelper.getResourceSecondsString; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java index 7ead77495ee63..01f9d4f48da2f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java @@ -42,7 +42,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Cluster CLI used to get over all information of the cluster diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java index 343dfc7bd2f84..6d2f0bbb417a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.client.cli; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.ClientRequest; @@ -105,6 +105,7 @@ public class LogsCLI extends Configured implements Tool { private static final String APP_OWNER_OPTION = "appOwner"; private static final String AM_CONTAINER_OPTION = "am"; private static final String PER_CONTAINER_LOG_FILES_OPTION = "log_files"; + private static final String PER_CONTAINER_LOG_FILES_OLD_OPTION = "logFiles"; private static final String PER_CONTAINER_LOG_FILES_REGEX_OPTION = "log_files_pattern"; private static final String LIST_NODES_OPTION = "list_nodes"; @@ -221,6 +222,12 @@ private int runCommand(String[] args) throws Exception { } if (commandLine.hasOption(PER_CONTAINER_LOG_FILES_OPTION)) { logFiles = commandLine.getOptionValues(PER_CONTAINER_LOG_FILES_OPTION); + } else { + // For backward compatibility, we need to check for the old form of this + // command line option as well. New form takes precedent. + if (commandLine.hasOption(PER_CONTAINER_LOG_FILES_OLD_OPTION)) { + logFiles = commandLine.getOptionValues(PER_CONTAINER_LOG_FILES_OLD_OPTION); + } } if (commandLine.hasOption(PER_CONTAINER_LOG_FILES_REGEX_OPTION)) { logFilesRegex = commandLine.getOptionValues( @@ -954,6 +961,12 @@ private Options createCommandOpts() { logFileOpt.setArgs(Option.UNLIMITED_VALUES); logFileOpt.setArgName("Log File Name"); opts.addOption(logFileOpt); + Option oldLogFileOpt = new Option(PER_CONTAINER_LOG_FILES_OLD_OPTION, true, + "Deprecated name for log_files, please use log_files option instead"); + oldLogFileOpt.setValueSeparator(','); + oldLogFileOpt.setArgs(Option.UNLIMITED_VALUES); + oldLogFileOpt.setArgName("Log File Name"); + opts.addOption(oldLogFileOpt); Option logFileRegexOpt = new Option(PER_CONTAINER_LOG_FILES_REGEX_OPTION, true, "Specify comma-separated value " + "to get matched log files by using java regex. Use \".*\" to " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java index d525087c22c09..45ea443c2bb7d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.client.cli; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.MissingArgumentException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java index 81b2d5c14d471..550b929d37ede 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java @@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.exceptions.YarnException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java index 489509b849d1c..d165ebf8b42b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java @@ -81,8 +81,8 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import static org.apache.hadoop.yarn.client.util.YarnClientUtils.NO_LABEL_ERR_MSG; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java index 6460de9812c79..87ae27b304074 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.client.cli; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java index 578a704d5b513..79b1406ed185c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java @@ -43,9 +43,9 @@ import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLSocketFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/util/YarnClientUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/util/YarnClientUtils.java index 94b13a07d3f1b..50a1a0b6c7dc7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/util/YarnClientUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/util/YarnClientUtils.java @@ -17,21 +17,20 @@ */ package org.apache.hadoop.yarn.client.util; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -39,7 +38,6 @@ import org.ietf.jgss.GSSException; import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; -import org.ietf.jgss.Oid; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -222,8 +220,6 @@ public static String generateToken(String server) throws IOException, @Override public String run() throws Exception { try { - // This Oid for Kerberos GSS-API mechanism. - Oid mechOid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID"); GSSManager manager = GSSManager.getInstance(); // GSS name for server GSSName serverName = manager.createName("HTTP@" + server, @@ -231,8 +227,9 @@ public String run() throws Exception { // Create a GSSContext for authentication with the service. // We're passing client credentials as null since we want them to // be read from the Subject. + // We're passing Oid as null to use the default. GSSContext gssContext = manager.createContext( - serverName.canonicalize(mechOid), mechOid, null, + serverName.canonicalize(null), null, null, GSSContext.DEFAULT_LIFETIME); gssContext.requestMutualAuth(true); gssContext.requestCredDeleg(true); @@ -245,8 +242,7 @@ public String run() throws Exception { LOG.debug("Got valid challenge for host {}", serverName); return new String(BASE_64_CODEC.encode(outToken), StandardCharsets.US_ASCII); - } catch (GSSException | IllegalAccessException - | NoSuchFieldException | ClassNotFoundException e) { + } catch (GSSException e) { LOG.error("Error: ", e); throw new AuthenticationException(e); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java index 25377f1672a02..6eae898734cee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java @@ -61,7 +61,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientWithReservation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientWithReservation.java index 06475b95167ae..073d7bc1c5be0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientWithReservation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClientWithReservation.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.client.api.impl; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java index 85aca0547fe9c..2d005a1526411 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java @@ -39,7 +39,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestClusterCLI { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java index 7966313b68978..a5549c57e46b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java @@ -36,7 +36,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.ClientResponse.Status; @@ -526,6 +526,48 @@ public ContainerReport getContainerReport(String containerIdStr) createEmptyLog("empty"))); sysOutStream.reset(); + // Check backward compatibility for -logFiles + exitCode = cli.run(new String[] {"-applicationId", appId.toString(), + "-logFiles", "stdout"}); + assertTrue("Failed with -logFiles", exitCode == 0); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId1, "syslog"))); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId2, "syslog"))); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId3, "syslog"))); + assertTrue("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId3, "stdout"))); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + logMessage(containerId3, "stdout1234"))); + assertFalse("Failed with -logFiles", sysOutStream.toString().contains( + createEmptyLog("empty"))); + sysOutStream.reset(); + + // Check -log_files supercedes -logFiles + exitCode = cli.run(new String[] {"-applicationId", appId.toString(), + "-log_files", "stdout", "-logFiles", "syslog"}); + assertTrue("Failed with -logFiles and -log_files", exitCode == 0); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId1, "syslog"))); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId2, "syslog"))); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId3, "syslog"))); + assertTrue("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId3, "stdout"))); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + logMessage(containerId3, "stdout1234"))); + assertFalse("Failed with -logFiles and -log_files", + sysOutStream.toString().contains( + createEmptyLog("empty"))); + sysOutStream.reset(); + exitCode = cli.run(new String[] {"-applicationId", appId.toString(), "-log_files_pattern", "std*"}); assertTrue(exitCode == 0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java index fb2e3cdaa3909..cab4bda76c477 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.client.cli; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse; @@ -60,8 +60,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** * Test class for TestNodeAttributesCLI. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java index 40b393ff3f38d..76035c2130ec2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java @@ -85,8 +85,8 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestRMAdminCLI { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java index 201b6d77d3cd2..4df20da68ccca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java @@ -66,6 +66,7 @@ import javax.servlet.http.HttpServletResponse; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; @@ -74,17 +75,10 @@ */ public class TestSchedConfCLI extends JerseyTestBase { - private ByteArrayOutputStream sysOutStream; - private PrintStream sysOut; - - private ByteArrayOutputStream sysErrStream; - private PrintStream sysErr; - private SchedConfCLI cli; private static MockRM rm; private static String userName; - private static CapacitySchedulerConfiguration csConf; private static final File CONF_FILE = new File(new File("target", "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE); @@ -101,14 +95,6 @@ public TestSchedConfCLI() { @Before public void setUp() { - sysOutStream = new ByteArrayOutputStream(); - sysOut = new PrintStream(sysOutStream); - System.setOut(sysOut); - - sysErrStream = new ByteArrayOutputStream(); - sysErr = new PrintStream(sysErrStream); - System.setErr(sysErr); - cli = new SchedConfCLI(); } @@ -131,8 +117,8 @@ protected void configureServlets() { + ioe.getMessage(), ioe); } - csConf = new CapacitySchedulerConfiguration(new Configuration(false), - false); + CapacitySchedulerConfiguration csConf = new + CapacitySchedulerConfiguration(new Configuration(false), false); setupQueueConfiguration(csConf); try { @@ -220,6 +206,9 @@ private void cleanUp() throws Exception { @Test(timeout = 10000) public void testGetSchedulerConf() throws Exception { + ByteArrayOutputStream sysOutStream = new ByteArrayOutputStream(); + PrintStream sysOut = new PrintStream(sysOutStream); + System.setOut(sysOut); try { super.setUp(); GuiceServletConfig.setInjector( @@ -268,51 +257,46 @@ public void testFormatSchedulerConf() throws Exception { @Test(timeout = 10000) public void testInvalidConf() throws Exception { + ByteArrayOutputStream sysErrStream = new ByteArrayOutputStream(); + PrintStream sysErr = new PrintStream(sysErrStream); + System.setErr(sysErr); + // conf pair with no key should be invalid - int exitCode = cli.run(new String[] {"-add", "root.a:=confVal"}); - assertTrue("Should return an error code", exitCode != 0); - assertTrue(sysErrStream.toString().contains("Specify configuration key " + - "value as confKey=confVal.")); - exitCode = cli.run(new String[] {"-update", "root.a:=confVal"}); - assertTrue("Should return an error code", exitCode != 0); - assertTrue(sysErrStream.toString().contains("Specify configuration key " + - "value as confKey=confVal.")); - - exitCode = cli.run(new String[] {"-add", "root.a:confKey=confVal=conf"}); - assertTrue("Should return an error code", exitCode != 0); - assertTrue(sysErrStream.toString().contains("Specify configuration key " + - "value as confKey=confVal.")); - exitCode = cli.run(new String[] {"-update", "root.a:confKey=confVal=c"}); - assertTrue("Should return an error code", exitCode != 0); - assertTrue(sysErrStream.toString().contains("Specify configuration key " + - "value as confKey=confVal.")); + executeCommand(sysErrStream, "-add", "root.a:=confVal"); + executeCommand(sysErrStream, "-update", "root.a:=confVal"); + executeCommand(sysErrStream, "-add", "root.a:confKey=confVal=conf"); + executeCommand(sysErrStream, "-update", "root.a:confKey=confVal=c"); + } + + private void executeCommand(ByteArrayOutputStream sysErrStream, String op, + String queueConf) throws Exception { + int exitCode = cli.run(new String[] {op, queueConf}); + assertNotEquals("Should return an error code", 0, exitCode); + assertTrue(sysErrStream.toString() + .contains("Specify configuration key " + "value as confKey=confVal.")); } @Test(timeout = 10000) public void testAddQueues() { SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo(); cli.addQueues("root.a:a1=aVal1,a2=aVal2,a3=", schedUpdateInfo); - QueueConfigInfo addInfo = schedUpdateInfo.getAddQueueInfo().get(0); - assertEquals("root.a", addInfo.getQueue()); - Map params = addInfo.getParams(); - assertEquals(3, params.size()); - assertEquals("aVal1", params.get("a1")); - assertEquals("aVal2", params.get("a2")); - assertNull(params.get("a3")); + Map paramValues = new HashMap<>(); + List addQueueInfo = schedUpdateInfo.getAddQueueInfo(); + paramValues.put("a1", "aVal1"); + paramValues.put("a2", "aVal2"); + paramValues.put("a3", null); + validateQueueConfigInfo(addQueueInfo, 0, "root.a", paramValues); schedUpdateInfo = new SchedConfUpdateInfo(); cli.addQueues("root.b:b1=bVal1;root.c:c1=cVal1", schedUpdateInfo); - assertEquals(2, schedUpdateInfo.getAddQueueInfo().size()); - QueueConfigInfo bAddInfo = schedUpdateInfo.getAddQueueInfo().get(0); - assertEquals("root.b", bAddInfo.getQueue()); - Map bParams = bAddInfo.getParams(); - assertEquals(1, bParams.size()); - assertEquals("bVal1", bParams.get("b1")); - QueueConfigInfo cAddInfo = schedUpdateInfo.getAddQueueInfo().get(1); - assertEquals("root.c", cAddInfo.getQueue()); - Map cParams = cAddInfo.getParams(); - assertEquals(1, cParams.size()); - assertEquals("cVal1", cParams.get("c1")); + addQueueInfo = schedUpdateInfo.getAddQueueInfo(); + assertEquals(2, addQueueInfo.size()); + paramValues.clear(); + paramValues.put("b1", "bVal1"); + validateQueueConfigInfo(addQueueInfo, 0, "root.b", paramValues); + paramValues.clear(); + paramValues.put("c1", "cVal1"); + validateQueueConfigInfo(addQueueInfo, 1, "root.c", paramValues); } @Test(timeout = 10000) @@ -320,12 +304,11 @@ public void testAddQueuesWithCommaInValue() { SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo(); cli.addQueues("root.a:a1=a1Val1\\,a1Val2 a1Val3,a2=a2Val1\\,a2Val2", schedUpdateInfo); - QueueConfigInfo addInfo = schedUpdateInfo.getAddQueueInfo().get(0); - assertEquals("root.a", addInfo.getQueue()); - Map params = addInfo.getParams(); - assertEquals(2, params.size()); - assertEquals("a1Val1,a1Val2 a1Val3", params.get("a1")); - assertEquals("a2Val1,a2Val2", params.get("a2")); + List addQueueInfo = schedUpdateInfo.getAddQueueInfo(); + Map params = new HashMap<>(); + params.put("a1", "a1Val1,a1Val2 a1Val3"); + params.put("a2", "a2Val1,a2Val2"); + validateQueueConfigInfo(addQueueInfo, 0, "root.a", params); } @Test(timeout = 10000) @@ -342,28 +325,35 @@ public void testRemoveQueues() { @Test(timeout = 10000) public void testUpdateQueues() { SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo(); + Map paramValues = new HashMap<>(); cli.updateQueues("root.a:a1=aVal1,a2=aVal2,a3=", schedUpdateInfo); - QueueConfigInfo updateInfo = schedUpdateInfo.getUpdateQueueInfo().get(0); - assertEquals("root.a", updateInfo.getQueue()); - Map params = updateInfo.getParams(); - assertEquals(3, params.size()); - assertEquals("aVal1", params.get("a1")); - assertEquals("aVal2", params.get("a2")); - assertNull(params.get("a3")); + List updateQueueInfo = schedUpdateInfo + .getUpdateQueueInfo(); + paramValues.put("a1", "aVal1"); + paramValues.put("a2", "aVal2"); + paramValues.put("a3", null); + validateQueueConfigInfo(updateQueueInfo, 0, "root.a", paramValues); schedUpdateInfo = new SchedConfUpdateInfo(); cli.updateQueues("root.b:b1=bVal1;root.c:c1=cVal1", schedUpdateInfo); - assertEquals(2, schedUpdateInfo.getUpdateQueueInfo().size()); - QueueConfigInfo bUpdateInfo = schedUpdateInfo.getUpdateQueueInfo().get(0); - assertEquals("root.b", bUpdateInfo.getQueue()); - Map bParams = bUpdateInfo.getParams(); - assertEquals(1, bParams.size()); - assertEquals("bVal1", bParams.get("b1")); - QueueConfigInfo cUpdateInfo = schedUpdateInfo.getUpdateQueueInfo().get(1); - assertEquals("root.c", cUpdateInfo.getQueue()); - Map cParams = cUpdateInfo.getParams(); - assertEquals(1, cParams.size()); - assertEquals("cVal1", cParams.get("c1")); + updateQueueInfo = schedUpdateInfo.getUpdateQueueInfo(); + assertEquals(2, updateQueueInfo.size()); + paramValues.clear(); + paramValues.put("b1", "bVal1"); + validateQueueConfigInfo(updateQueueInfo, 0, "root.b", paramValues); + paramValues.clear(); + paramValues.put("c1", "cVal1"); + validateQueueConfigInfo(updateQueueInfo, 1, "root.c", paramValues); + } + + private void validateQueueConfigInfo( + List updateQueueInfo, int index, String queuename, + Map paramValues) { + QueueConfigInfo updateInfo = updateQueueInfo.get(index); + assertEquals(queuename, updateInfo.getQueue()); + Map params = updateInfo.getParams(); + assertEquals(paramValues.size(), params.size()); + paramValues.forEach((k, v) -> assertEquals(v, params.get(k))); } @Test(timeout = 10000) @@ -371,12 +361,12 @@ public void testUpdateQueuesWithCommaInValue() { SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo(); cli.updateQueues("root.a:a1=a1Val1\\,a1Val2 a1Val3,a2=a2Val1\\,a2Val2", schedUpdateInfo); - QueueConfigInfo updateInfo = schedUpdateInfo.getUpdateQueueInfo().get(0); - assertEquals("root.a", updateInfo.getQueue()); - Map params = updateInfo.getParams(); - assertEquals(2, params.size()); - assertEquals("a1Val1,a1Val2 a1Val3", params.get("a1")); - assertEquals("a2Val1,a2Val2", params.get("a2")); + List updateQueueInfo = schedUpdateInfo + .getUpdateQueueInfo(); + Map paramValues = new HashMap<>(); + paramValues.put("a1", "a1Val1,a1Val2 a1Val3"); + paramValues.put("a2", "a2Val1,a2Val2"); + validateQueueConfigInfo(updateQueueInfo, 0, "root.a", paramValues); } @Test(timeout = 10000) @@ -384,10 +374,10 @@ public void testGlobalUpdate() { SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo(); cli.globalUpdates("schedKey1=schedVal1,schedKey2=schedVal2", schedUpdateInfo); - Map globalInfo = schedUpdateInfo.getGlobalParams(); - assertEquals(2, globalInfo.size()); - assertEquals("schedVal1", globalInfo.get("schedKey1")); - assertEquals("schedVal2", globalInfo.get("schedKey2")); + Map paramValues = new HashMap<>(); + paramValues.put("schedKey1", "schedVal1"); + paramValues.put("schedKey2", "schedVal2"); + validateGlobalParams(schedUpdateInfo, paramValues); } @Test(timeout = 10000) @@ -396,10 +386,16 @@ public void testGlobalUpdateWithCommaInValue() { cli.globalUpdates( "schedKey1=schedVal1.1\\,schedVal1.2 schedVal1.3,schedKey2=schedVal2", schedUpdateInfo); + Map paramValues = new HashMap<>(); + paramValues.put("schedKey1", "schedVal1.1,schedVal1.2 schedVal1.3"); + paramValues.put("schedKey2", "schedVal2"); + validateGlobalParams(schedUpdateInfo, paramValues); + } + + private void validateGlobalParams(SchedConfUpdateInfo schedUpdateInfo, + Map paramValues) { Map globalInfo = schedUpdateInfo.getGlobalParams(); - assertEquals(2, globalInfo.size()); - assertEquals("schedVal1.1,schedVal1.2 schedVal1.3", - globalInfo.get("schedKey1")); - assertEquals("schedVal2", globalInfo.get("schedKey2")); + assertEquals(paramValues.size(), globalInfo.size()); + paramValues.forEach((k, v) -> assertEquals(v, globalInfo.get(k))); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index aba1ca443bbdb..861a51dec7ab0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -100,8 +100,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestYarnCLI { private static final Logger LOG = LoggerFactory.getLogger(TestYarnCLI.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index 4590c65a43a9a..19dde39245d28 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-common - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Common @@ -45,7 +45,6 @@ hadoop-hdfs-client - org.apache.hadoop @@ -86,8 +85,8 @@ jersey-client - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava commons-cli diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java index 8c8d7f17feac7..70f0ed77041ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; @@ -207,7 +207,7 @@ public class ApplicationClientProtocolPBClientImpl implements ApplicationClientP public ApplicationClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ApplicationClientProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(ApplicationClientProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java index ceace111ac1d6..c488164ceb4e6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationHistoryProtocolPBClientImpl.java @@ -23,12 +23,11 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.yarn.api.ApplicationClientProtocolPB; import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; import org.apache.hadoop.yarn.api.ApplicationHistoryProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; @@ -86,7 +85,7 @@ public class ApplicationHistoryProtocolPBClientImpl implements public ApplicationHistoryProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ApplicationHistoryProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(ApplicationHistoryProtocolPB.class, clientVersion, addr, conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationMasterProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationMasterProtocolPBClientImpl.java index b4a20af6b605c..4525a001024d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationMasterProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationMasterProtocolPBClientImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB; @@ -55,7 +55,8 @@ public class ApplicationMasterProtocolPBClientImpl implements ApplicationMasterP public ApplicationMasterProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, ApplicationMasterProtocolPB.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, ApplicationMasterProtocolPB.class, + ProtobufRpcEngine2.class); proxy = (ApplicationMasterProtocolPB) RPC.getProxy(ApplicationMasterProtocolPB.class, clientVersion, addr, conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientSCMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientSCMProtocolPBClientImpl.java index a1c2d5b86ef8a..7ee70e51f48e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientSCMProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientSCMProtocolPBClientImpl.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.ClientSCMProtocol; import org.apache.hadoop.yarn.api.ClientSCMProtocolPB; @@ -50,7 +50,7 @@ public class ClientSCMProtocolPBClientImpl implements ClientSCMProtocol, public ClientSCMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ClientSCMProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(ClientSCMProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java index d5c191103c282..86fc398f2520e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java @@ -21,7 +21,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -106,7 +106,7 @@ public class ContainerManagementProtocolPBClientImpl implements ContainerManagem public ContainerManagementProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ContainerManagementProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); int expireIntvl = conf.getInt(NM_COMMAND_TIMEOUT, DEFAULT_COMMAND_TIMEOUT); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/CsiAdaptorProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/CsiAdaptorProtocolPBClientImpl.java index 2ab36558f44a1..9aff674ef32c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/CsiAdaptorProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/CsiAdaptorProtocolPBClientImpl.java @@ -19,7 +19,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.CsiAdaptorPB; import org.apache.hadoop.yarn.api.CsiAdaptorProtocol; @@ -57,7 +57,7 @@ public class CsiAdaptorProtocolPBClientImpl public CsiAdaptorProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, CsiAdaptorPB.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, CsiAdaptorPB.class, ProtobufRpcEngine2.class); this.proxy = RPC.getProxy(CsiAdaptorPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java index 73d9287aa75a1..32b576aeec176 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java @@ -22,7 +22,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; - import org.apache.commons.lang3.Range; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -35,8 +34,6 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProtoOrBuilder; -import com.google.common.base.Function; -import com.google.common.collect.Iterables; import org.apache.hadoop.thirdparty.protobuf.TextFormat; @Private @@ -88,13 +85,8 @@ private void mergeLocalToBuilder() { } if (applicationStates != null && !applicationStates.isEmpty()) { builder.clearApplicationStates(); - builder.addAllApplicationStates(Iterables.transform(applicationStates, - new Function() { - @Override - public YarnApplicationStateProto apply(YarnApplicationState input) { - return ProtoUtils.convertToProtoFormat(input); - } - })); + applicationStates.forEach(input -> + builder.addApplicationStates(ProtoUtils.convertToProtoFormat(input))); } if (applicationTags != null && !applicationTags.isEmpty()) { builder.clearApplicationTags(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java index 26a63894542d2..20732aa38010c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java @@ -37,7 +37,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsResponseProtoOrBuilder; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class GetNodesToLabelsResponsePBImpl extends GetNodesToLabelsResponse { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoRequestPBImpl.java index 0d1c2e52f006c..24661e2054701 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoRequestPBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.GetPluginInfoRequest; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoResponsePBImpl.java index 141fd6d5c89d7..ece903f78936a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoResponsePBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.GetPluginInfoResponse; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeRequestPBImpl.java index a512c6ae01ff9..82bb6e314393c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeRequestPBImpl.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.protobuf.TextFormat; import org.apache.hadoop.yarn.api.protocolrecords.NodePublishVolumeRequest; import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeResponsePBImpl.java index cbdf91fd50a6b..43c4ada4f0f4f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeResponsePBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.NodePublishVolumeResponse; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeRequestPBImpl.java index 23ab073521135..46a59b46fd739 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeRequestPBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.protobuf.TextFormat; import org.apache.hadoop.yarn.api.protocolrecords.NodeUnpublishVolumeRequest; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeResponsePBImpl.java index 8406e419e52bc..8b170ea88e072 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeResponsePBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.NodeUnpublishVolumeResponse; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesRequestPBImpl.java index bf3f4f55b24f9..09aabe05ffa5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesRequestPBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesResponsePBImpl.java index aa33ab7529b11..2d5421d7d0775 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesResponsePBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesResponse; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java index 521d9ccfeabd7..607d83245e7c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java @@ -25,7 +25,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java index df038e5f0e7c3..1141ef31834bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index 52feddf336304..5f9a71bd442b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -39,7 +39,6 @@ import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; -import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProtoOrBuilder; @@ -277,24 +276,6 @@ public synchronized void setApplicationType(String applicationType) { builder.setApplicationType((applicationType)); } - private void checkTags(Set tags) { - if (tags.size() > YarnConfiguration.APPLICATION_MAX_TAGS) { - throw new IllegalArgumentException("Too many applicationTags, a maximum of only " - + YarnConfiguration.APPLICATION_MAX_TAGS + " are allowed!"); - } - for (String tag : tags) { - if (tag.length() > YarnConfiguration.APPLICATION_MAX_TAG_LENGTH) { - throw new IllegalArgumentException("Tag " + tag + " is too long, " + - "maximum allowed length of a tag is " + - YarnConfiguration.APPLICATION_MAX_TAG_LENGTH); - } - if (!org.apache.commons.lang3.StringUtils.isAsciiPrintable(tag)) { - throw new IllegalArgumentException("A tag can only have ASCII " + - "characters! Invalid tag - " + tag); - } - } - } - @Override public synchronized void setApplicationTags(Set tags) { maybeInitBuilder(); @@ -303,7 +284,6 @@ public synchronized void setApplicationTags(Set tags) { this.applicationTags = null; return; } - checkTags(tags); // Convert applicationTags to lower case and add this.applicationTags = new TreeSet<>(); for (String tag : tags) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java index 298f0920476b5..2a0f0eef8f3fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java @@ -25,7 +25,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java index b3dad0a0165a1..53d52d3c94a23 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java @@ -68,7 +68,7 @@ public ContainerPBImpl(ContainerProto proto) { viaProto = true; } - public ContainerProto getProto() { + synchronized public ContainerProto getProto() { mergeLocalToProto(); proto = viaProto ? proto : builder.build(); @@ -142,7 +142,7 @@ private void maybeInitBuilder() { } @Override - public ContainerId getId() { + synchronized public ContainerId getId() { ContainerProtoOrBuilder p = viaProto ? proto : builder; if (this.containerId != null) { return this.containerId; @@ -176,7 +176,7 @@ public NodeId getNodeId() { } @Override - public void setId(ContainerId id) { + synchronized public void setId(ContainerId id) { maybeInitBuilder(); if (id == null) builder.clearId(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java index 4c725faed8a30..95bc29b2e5c0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java index 455ca24405f08..cdeb417243e37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java @@ -96,8 +96,8 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.LocalizationStateProto; import org.apache.hadoop.yarn.server.api.ContainerType; -import com.google.common.collect.Interner; -import com.google.common.collect.Interners; +import org.apache.hadoop.thirdparty.com.google.common.collect.Interner; +import org.apache.hadoop.thirdparty.com.google.common.collect.Interners; import org.apache.hadoop.thirdparty.protobuf.ByteString; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationIdPBImpl.java index 609d4ab1ba725..a87c7d90e45fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationIdPBImpl.java @@ -23,7 +23,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.proto.YarnProtos.ReservationIdProto; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java index f8e24ad8276d5..35fe945a07fab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProtoOrBuilder; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; public class ResourceOptionPBImpl extends ResourceOption { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java index ed9f3dbb8cd8a..75f87541603b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.api.records.impl.pb; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java index 23eb29a494524..c69a9df561bb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java @@ -41,8 +41,8 @@ import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @InterfaceAudience.Public @InterfaceStability.Stable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java index bda3c75501f66..dc05f7f118664 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java @@ -24,6 +24,7 @@ import java.net.InetSocketAddress; import java.net.NoRouteToHostException; import java.net.SocketException; +import java.net.SocketTimeoutException; import java.net.UnknownHostException; import java.security.PrivilegedAction; import java.util.HashMap; @@ -49,7 +50,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.ipc.YarnRPC; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Public @InterfaceStability.Evolving @@ -294,6 +295,7 @@ protected static RetryPolicy createRetryPolicy(Configuration conf, exceptionToPolicyMap.put(ConnectTimeoutException.class, retryPolicy); exceptionToPolicyMap.put(RetriableException.class, retryPolicy); exceptionToPolicyMap.put(SocketException.class, retryPolicy); + exceptionToPolicyMap.put(SocketTimeoutException.class, retryPolicy); exceptionToPolicyMap.put(StandbyException.class, retryPolicy); // YARN-4288: local IOException is also possible. exceptionToPolicyMap.put(IOException.class, retryPolicy); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java index 6188d6ac259e7..b7775b6fbd59d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java @@ -41,7 +41,7 @@ import org.apache.hadoop.yarn.exceptions.NMNotYetReadyException; import org.apache.hadoop.yarn.ipc.YarnRPC; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; @Public @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java index 7eb4ec129c766..70abfdeef1bf5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java @@ -51,7 +51,7 @@ import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.client.Client; @Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java index ca0f307cdb4e6..3df1338b33945 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; +import java.io.InterruptedIOException; import java.lang.reflect.UndeclaredThrowableException; import java.net.ConnectException; import java.net.HttpURLConnection; @@ -52,9 +53,9 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.ClientRequest; @@ -352,7 +353,8 @@ public Object retryOn(TimelineClientRetryOp op) // sleep for the given time interval Thread.sleep(retryInterval); } catch (InterruptedException ie) { - LOG.warn("Client retry sleep interrupted! "); + Thread.currentThread().interrupt(); + throw new InterruptedIOException("Client retry sleep interrupted!"); } } throw new RuntimeException("Failed to connect to timeline server. " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java index 475864e1c08cb..d5af8a0df5692 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.client.api.impl; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.core.util.MultivaluedMapImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java index 65369568feff6..ce56725dc01d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java @@ -55,7 +55,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.UniformInterfaceException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java index 1f4f2015dbbe4..26c0877207eed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java @@ -40,7 +40,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index 15168e9041bdf..f9deab06ff27f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -20,11 +20,11 @@ import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +38,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Dispatches {@link Event}s in a separate thread. Currently only single thread @@ -268,11 +268,16 @@ public EventHandler getEventHandler() { } class GenericEventHandler implements EventHandler { - private void printEventQueueDetails(BlockingQueue queue) { - Map counterMap = eventQueue.stream(). - collect(Collectors. - groupingBy(e -> e.getType(), Collectors.counting()) - ); + private void printEventQueueDetails() { + Iterator iterator = eventQueue.iterator(); + Map counterMap = new HashMap<>(); + while (iterator.hasNext()) { + Enum eventType = iterator.next().getType(); + if (!counterMap.containsKey(eventType)) { + counterMap.put(eventType, 0L); + } + counterMap.put(eventType, counterMap.get(eventType) + 1); + } for (Map.Entry entry : counterMap.entrySet()) { long num = entry.getValue(); LOG.info("Event type: " + entry.getKey() @@ -295,7 +300,7 @@ public void handle(Event event) { if (qSize != 0 && qSize % detailsInterval == 0 && lastEventDetailsQueueSizeLogged != qSize) { lastEventDetailsQueueSizeLogged = qSize; - printEventQueueDetails(eventQueue); + printEventQueueDetails(); printTrigger = true; } int remCapacity = eventQueue.remainingCapacity(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index ccd8e2e22b82d..0969e999718e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.event; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java index 7b48d5f8a721f..17571ed03d297 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java @@ -30,7 +30,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.token.SecretManager; @@ -165,7 +165,7 @@ private String getPackageName(Class clazz) { private Server createServer(Class pbProtocol, InetSocketAddress addr, Configuration conf, SecretManager secretManager, int numHandlers, BlockingService blockingService, String portRangeConfig) throws IOException { - RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine2.class); RPC.Server server = new RPC.Builder(conf).setProtocol(pbProtocol) .setInstance(blockingService).setBindAddress(addr.getHostName()) .setPort(addr.getPort()).setNumHandlers(numHandlers).setVerbose(false) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java index a7774f6bd810f..3eda944c29c7d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java @@ -45,7 +45,7 @@ import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A service that periodically deletes aggregated logs. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java index ca43fe6ad9648..0fa9764b7bb1f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java @@ -73,10 +73,9 @@ import org.apache.hadoop.yarn.util.Times; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; @Public @Evolving @@ -355,14 +354,9 @@ private Set getFileCandidates(Set candidates, : this.logAggregationContext.getRolledLogsExcludePattern(), candidates, true); - Iterable mask = - Iterables.filter(candidates, new Predicate() { - @Override - public boolean apply(File next) { - return !alreadyUploadedLogFiles - .contains(getLogFileMetaData(next)); - } - }); + Iterable mask = Iterables.filter(candidates, (input) -> + !alreadyUploadedLogFiles + .contains(getLogFileMetaData(input))); return Sets.newHashSet(mask); } @@ -579,13 +573,17 @@ public static class LogReader { public LogReader(Configuration conf, Path remoteAppLogFile) throws IOException { - FileContext fileContext = - FileContext.getFileContext(remoteAppLogFile.toUri(), conf); - this.fsDataIStream = fileContext.open(remoteAppLogFile); - reader = - new TFile.Reader(this.fsDataIStream, fileContext.getFileStatus( - remoteAppLogFile).getLen(), conf); - this.scanner = reader.createScanner(); + try { + FileContext fileContext = + FileContext.getFileContext(remoteAppLogFile.toUri(), conf); + this.fsDataIStream = fileContext.open(remoteAppLogFile); + reader = new TFile.Reader(this.fsDataIStream, + fileContext.getFileStatus(remoteAppLogFile).getLen(), conf); + this.scanner = reader.createScanner(); + } catch (IOException ioe) { + close(); + throw new IOException("Error in creating LogReader", ioe); + } } private boolean atBeginning = true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java index b51be9af14d2e..5f9466f38641c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java @@ -28,7 +28,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java index 385ad094aa908..dd0279699e171 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java @@ -41,7 +41,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class LogCLIHelpers implements Configurable { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java index ab6eb613209c3..cf305babea6ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java @@ -18,10 +18,8 @@ package org.apache.hadoop.yarn.logaggregation.filecontroller; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; @@ -35,7 +33,7 @@ import java.util.List; import java.util.Map; import java.util.Set; - +import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -532,17 +530,12 @@ protected void cleanOldLogs(Path remoteNodeLogFileForApp, Set status = new HashSet(Arrays.asList(remoteFS.listStatus(appDir))); - Iterable mask = - Iterables.filter(status, new Predicate() { - @Override - public boolean apply(FileStatus next) { - return next.getPath().getName() - .contains(LogAggregationUtils.getNodeString(nodeId)) - && !next.getPath().getName().endsWith( - LogAggregationUtils.TMP_FILE_SUFFIX); - } - }); - status = Sets.newHashSet(mask); + status = status.stream().filter( + next -> next.getPath().getName() + .contains(LogAggregationUtils.getNodeString(nodeId)) + && !next.getPath().getName().endsWith( + LogAggregationUtils.TMP_FILE_SUFFIX)).collect( + Collectors.toSet()); // Normally, we just need to delete one oldest log // before we upload a new log. // If we can not delete the older logs in this cycle, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java index c653691ff8226..e8b730673904d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.logaggregation.filecontroller; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java index e94a92a6e20dc..8047f4a519bcb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java @@ -18,10 +18,8 @@ package org.apache.hadoop.yarn.logaggregation.filecontroller.ifile; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; + import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -43,6 +41,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.stream.Collectors; import org.apache.commons.lang3.SerializationUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -706,16 +705,12 @@ public int compare(ContainerLogMeta o1, ContainerLogMeta o2) { public Map parseCheckSumFiles( List fileList) throws IOException { Map checkSumFiles = new HashMap<>(); - Set status = new HashSet(fileList); - Iterable mask = - Iterables.filter(status, new Predicate() { - @Override - public boolean apply(FileStatus next) { - return next.getPath().getName().endsWith( - CHECK_SUM_FILE_SUFFIX); - } - }); - status = Sets.newHashSet(mask); + Set status = + new HashSet<>(fileList).stream().filter( + next -> next.getPath().getName().endsWith( + CHECK_SUM_FILE_SUFFIX)).collect( + Collectors.toSet()); + FileContext fc = null; for (FileStatus file : status) { FSDataInputStream checksumFileInputStream = null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsCustomResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetricValue.java similarity index 81% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsCustomResource.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetricValue.java index 2cd9bf2153ccb..e5482920f4dfb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsCustomResource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetricValue.java @@ -14,9 +14,9 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.scheduler; +package org.apache.hadoop.yarn.metrics; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.util.resource.ResourceUtils; @@ -29,26 +29,26 @@ * the name of the custom resource. * There are different kinds of values like allocated, available and others. */ -public class QueueMetricsCustomResource { +public class CustomResourceMetricValue { private final Map values = Maps.newHashMap(); - protected void increase(Resource res) { + public void increase(Resource res) { update(res, Long::sum); } - void increaseWithMultiplier(Resource res, long multiplier) { + public void increaseWithMultiplier(Resource res, long multiplier) { update(res, (v1, v2) -> v1 + v2 * multiplier); } - protected void decrease(Resource res) { + public void decrease(Resource res) { update(res, (v1, v2) -> v1 - v2); } - void decreaseWithMultiplier(Resource res, int containers) { + public void decreaseWithMultiplier(Resource res, int containers) { update(res, (v1, v2) -> v1 - v2 * containers); } - protected void set(Resource res) { + public void set(Resource res) { update(res, (v1, v2) -> v2); } @@ -64,8 +64,7 @@ private void update(Resource res, BiFunction operation) { if (!values.containsKey(resource.getName())) { values.put(resource.getName(), 0L); } - values.merge(resource.getName(), - resource.getValue(), operation); + values.merge(resource.getName(), resource.getValue(), operation); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetrics.java new file mode 100644 index 0000000000000..926f8520604f8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetrics.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.metrics; + +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; + +import java.util.HashMap; +import java.util.Map; + +/** + * This is base class for allocated and available metrics for + * custom resources. + */ +public class CustomResourceMetrics { + private static final String ALLOCATED_RESOURCE_METRIC_PREFIX = + "AllocatedResource."; + private static final String ALLOCATED_RESOURCE_METRIC_DESC = "Allocated NAME"; + + private static final String AVAILABLE_RESOURCE_METRIC_PREFIX = + "AvailableResource."; + private static final String AVAILABLE_RESOURCE_METRIC_DESC = "Available NAME"; + + private final CustomResourceMetricValue allocated = + new CustomResourceMetricValue(); + private final CustomResourceMetricValue available = + new CustomResourceMetricValue(); + + /** + * Register all custom resources metrics as part of initialization. + * @param customResources Map containing all custom resource types + * @param registry of the metric type + */ + public void registerCustomResources(Map customResources, + MetricsRegistry registry) { + registerCustomResources(customResources, registry, + ALLOCATED_RESOURCE_METRIC_PREFIX, ALLOCATED_RESOURCE_METRIC_DESC); + registerCustomResources(customResources, registry, + AVAILABLE_RESOURCE_METRIC_PREFIX, AVAILABLE_RESOURCE_METRIC_DESC); + } + + /** + * Get a map of all custom resource metric. + * @return map of custom resource + */ + public Map initAndGetCustomResources() { + Map customResources = new HashMap(); + ResourceInformation[] resources = ResourceUtils.getResourceTypesArray(); + + for (int i = 2; i < resources.length; i++) { + ResourceInformation resource = resources[i]; + customResources.put(resource.getName(), Long.valueOf(0)); + } + return customResources; + } + + /** + * As and when this metric object construction happens for any queue, all + * custom resource metrics value would be initialized with '0' like any other + * mandatory resources metrics. + * @param customResources Map containing all custom resource types + * @param registry of the metric type + * @param metricPrefix prefix in metric name + * @param metricDesc suffix for metric name + */ + public void registerCustomResources(Map customResources, + MetricsRegistry registry, String metricPrefix, String metricDesc) { + for (Map.Entry entry : customResources.entrySet()) { + String resourceName = entry.getKey(); + Long resourceValue = entry.getValue(); + + MutableGaugeLong resourceMetric = + (MutableGaugeLong) registry.get(metricPrefix + resourceName); + + if (resourceMetric == null) { + resourceMetric = registry.newGauge(metricPrefix + resourceName, + metricDesc.replace("NAME", resourceName), 0L); + } + resourceMetric.set(resourceValue); + } + } + + public void setAvailable(Resource res) { + available.set(res); + } + + public void increaseAllocated(Resource res) { + allocated.increase(res); + } + + public void increaseAllocated(Resource res, int containers) { + allocated.increaseWithMultiplier(res, containers); + } + + public void decreaseAllocated(Resource res) { + allocated.decrease(res); + } + + public void decreaseAllocated(Resource res, int containers) { + allocated.decreaseWithMultiplier(res, containers); + } + + public Map getAllocatedValues() { + return allocated.getValues(); + } + + public Map getAvailableValues() { + return available.getValues(); + } + + public CustomResourceMetricValue getAvailable() { + return available; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/package-info.java new file mode 100644 index 0000000000000..5df20b1bf88d1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Provides common metrics (available, allocated) for custom resources. + */ +@InterfaceAudience.Private +package org.apache.hadoop.yarn.metrics; +import org.apache.hadoop.classification.InterfaceAudience; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java index c706989e73d19..c19faaaad3fcd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java @@ -58,8 +58,8 @@ import org.apache.hadoop.yarn.nodelabels.event.UpdateNodeToLabelsMappingsEvent; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; @Private public class CommonNodeLabelsManager extends AbstractService { @@ -559,6 +559,50 @@ private void replaceNodeForLabels(NodeId node, Set oldLabels, addNodeToLabels(node, newLabels); } + private void addLabelsToNodeInHost(NodeId node, Set labels) + throws IOException { + Host host = nodeCollections.get(node.getHost()); + if (null == host) { + throw new IOException("Cannot add labels to a host that " + + "does not exist. Create the host before adding labels to it."); + } + Node nm = host.nms.get(node); + if (nm != null) { + Node newNm = nm.copy(); + if (newNm.labels == null) { + newNm.labels = + Collections.newSetFromMap(new ConcurrentHashMap()); + } + newNm.labels.addAll(labels); + host.nms.put(node, newNm); + } + } + + protected void removeLabelsFromNodeInHost(NodeId node, Set labels) + throws IOException { + Host host = nodeCollections.get(node.getHost()); + if (null == host) { + throw new IOException("Cannot remove labels from a host that " + + "does not exist. Create the host before adding labels to it."); + } + Node nm = host.nms.get(node); + if (nm != null) { + if (nm.labels == null) { + nm.labels = new HashSet(); + } else { + nm.labels.removeAll(labels); + } + } + } + + private void replaceLabelsForNode(NodeId node, Set oldLabels, + Set newLabels) throws IOException { + if(oldLabels != null) { + removeLabelsFromNodeInHost(node, oldLabels); + } + addLabelsToNodeInHost(node, newLabels); + } + @SuppressWarnings("unchecked") protected void internalUpdateLabelsOnNodes( Map> nodeToLabels, NodeLabelUpdateOperation op) @@ -597,10 +641,14 @@ protected void internalUpdateLabelsOnNodes( break; case REPLACE: replaceNodeForLabels(nodeId, host.labels, labels); + replaceLabelsForNode(nodeId, host.labels, labels); host.labels.clear(); host.labels.addAll(labels); for (Node node : host.nms.values()) { replaceNodeForLabels(node.nodeId, node.labels, labels); + if (node.labels != null) { + replaceLabelsForNode(node.nodeId, node.labels, labels); + } node.labels = null; } break; @@ -625,6 +673,7 @@ protected void internalUpdateLabelsOnNodes( case REPLACE: oldLabels = getLabelsByNode(nodeId); replaceNodeForLabels(nodeId, oldLabels, labels); + replaceLabelsForNode(nodeId, oldLabels, labels); if (nm.labels == null) { nm.labels = new HashSet(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java index c313998598bdd..02f2188bfc8e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.nodelabels; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.yarn.api.records.NodeAttribute; import org.apache.hadoop.yarn.api.records.NodeAttributeKey; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeAttributeMirrorOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeAttributeMirrorOp.java index dca0555abc97c..f1c9a95ae976b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeAttributeMirrorOp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeAttributeMirrorOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.nodelabels.store.op; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.api.records.NodeAttribute; import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java index 2fc4ac3dfc2b5..5280afd41bee6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.nodelabels.store.op; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos; import org.apache.hadoop.yarn.server.api.protocolrecords diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java index d6087792d4fc9..6d100f30ccd02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java @@ -28,7 +28,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.util.List; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java index da6a8c54aa480..0846af3085cac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java @@ -22,7 +22,7 @@ import java.io.DataOutputStream; import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java index 639017aa622a7..20729a3cc8a73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -114,7 +114,7 @@ public class ResourceManagerAdministrationProtocolPBClientImpl implements Resour public ResourceManagerAdministrationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ResourceManagerAdministrationProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = (ResourceManagerAdministrationProtocolPB)RPC.getProxy( ResourceManagerAdministrationProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMAdminProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMAdminProtocolPBClientImpl.java index a1ead5b4176cd..fb7a750852a52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMAdminProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMAdminProtocolPBClientImpl.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.server.api.SCMAdminProtocol; import org.apache.hadoop.yarn.server.api.SCMAdminProtocolPB; @@ -45,7 +45,7 @@ public class SCMAdminProtocolPBClientImpl implements SCMAdminProtocol, public SCMAdminProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, SCMAdminProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(SCMAdminProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java index f6634b4f68f2d..66fddad2d792a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java @@ -33,7 +33,7 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProtoOrBuilder; import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class ReplaceLabelsOnNodeRequestPBImpl extends ReplaceLabelsOnNodeRequest { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java index 952ad77824c57..a53beed49da29 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java @@ -35,7 +35,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.AdminACLsManager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Private public class ApplicationACLsManager { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java index 74ee5fca95e8f..8e5d82e837533 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java @@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.File; import java.io.IOException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java index 917d69659875f..08f6fe17223c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.util; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java index e7369a9903988..e5fb417561179 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java @@ -32,6 +32,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; @@ -53,10 +54,10 @@ import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.util.concurrent.Futures; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.yarn.exceptions.YarnException; /** @@ -269,9 +270,12 @@ private void verifyAndCopy(Path destination) FileSystem sourceFs = sCopy.getFileSystem(conf); FileStatus sStat = sourceFs.getFileStatus(sCopy); if (sStat.getModificationTime() != resource.getTimestamp()) { - throw new IOException("Resource " + sCopy + - " changed on src filesystem (expected " + resource.getTimestamp() + - ", was " + sStat.getModificationTime()); + throw new IOException("Resource " + sCopy + " changed on src filesystem" + + " - expected: " + + "\"" + Times.formatISO8601(resource.getTimestamp()) + "\"" + + ", was: " + + "\"" + Times.formatISO8601(sStat.getModificationTime()) + "\"" + + ", current time: " + "\"" + Times.formatISO8601(Time.now()) + "\""); } if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) { if (!isPublic(sourceFs, sCopy, sStat, statCache)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java index 07ddbe953a965..0e4cc439043f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java @@ -22,7 +22,7 @@ import java.util.Collections; import java.util.List; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceAudience.LimitedPrivate({"YARN", "MAPREDUCE"}) public final class RackResolver { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java index 331be308e2efa..54ba886eaed01 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.util; -import com.google.common.base.Joiner; -import com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; import java.util.ArrayList; import java.util.List; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java index 1b21d2f45aeed..a7b59806a0b4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.util.resource; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -166,4 +166,9 @@ public Set getInsufficientResourceNames(Resource required, return ImmutableSet.of(); } } + + @Override + public boolean isAllInvalidDivisor(Resource r) { + return isInvalidDivisor(r); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java index 0a6df09de9905..db62a895e6e54 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java @@ -388,6 +388,7 @@ public boolean isInvalidDivisor(Resource r) { return false; } + @Override public boolean isAllInvalidDivisor(Resource r) { boolean flag = true; for (ResourceInformation res : r.getResources()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java index 91cbdd7e7e9a2..05850137c747e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java @@ -225,6 +225,15 @@ public abstract float divide( */ public abstract boolean isInvalidDivisor(Resource r); + + /** + * Determine if all resources are zero. + * + * @param r resource + * @return true if all divisors are invalid (should not be used), false else + */ + public abstract boolean isAllInvalidDivisor(Resource r); + /** * Ratio of resource a to resource b. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java index 63a9ba55856fe..6710ee782bee7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java @@ -24,7 +24,7 @@ import com.fasterxml.jackson.core.JsonGenerationException; import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java index 1b25b84889d75..ad80a2eefe5bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import com.google.inject.Inject; import com.google.inject.Injector; import com.google.inject.servlet.RequestScoped; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java index f13a4e990e486..d0503876b879a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp; -import static com.google.common.base.Preconditions.checkState; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkState; import java.io.IOException; import java.util.Timer; @@ -39,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import com.google.inject.Inject; import com.google.inject.Injector; import com.google.inject.Singleton; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java index 94063ed222ba1..0ef5b221f8977 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.google.inject.servlet.RequestScoped; import java.util.Iterator; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java index b1c7834421309..5e7aa68b948c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.webapp; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkState; import static org.apache.hadoop.yarn.util.StringHelper.djoin; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.util.StringHelper.pjoin; @@ -36,9 +36,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.CharMatcher; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.base.CharMatcher; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** * Manages path info to controller#action routing. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java index fad6fe29462ae..f6511b3c9c852 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import java.net.InetSocketAddress; import java.util.ArrayList; @@ -34,8 +34,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Splitter; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.google.inject.Provides; import com.google.inject.servlet.GuiceFilter; import com.google.inject.servlet.ServletModule; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 7aec0ddbcc2f8..2f02fd7a0762f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp; -import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import java.io.IOException; import java.net.ConnectException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java index 8a2db8f98125d..83373ef8ca215 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp.hamlet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import java.io.IOException; import java.io.PrintWriter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java index b0ff19f20d771..375fb6045d6a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java @@ -18,10 +18,10 @@ package org.apache.hadoop.yarn.webapp.hamlet; -import com.google.common.base.Joiner; -import static com.google.common.base.Preconditions.*; -import com.google.common.base.Splitter; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import java.io.PrintWriter; import java.util.EnumSet; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java index c6ca93c597ce9..722dd7347338b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp.hamlet2; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import java.io.IOException; import java.io.PrintWriter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java index 1c4db0678eda6..c1f5195d6d7a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java @@ -18,10 +18,10 @@ package org.apache.hadoop.yarn.webapp.hamlet2; -import com.google.common.base.Joiner; -import static com.google.common.base.Preconditions.*; -import com.google.common.base.Splitter; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import java.io.PrintWriter; import java.util.EnumSet; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/DefaultPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/DefaultPage.java index 323a17c61a581..176e28ac9305f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/DefaultPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/DefaultPage.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp.view; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import java.util.Enumeration; import java.util.Map; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java index 4438da09e9f0b..0c9529280b790 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java @@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.HTML; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) public class JQueryUI extends HtmlBlock { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java index fe71395bf9388..f33474f7a0e1a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.yarn.webapp.SubView; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * A simpler two column layout implementation with a header, a navigation bar diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 49b3bb293a37b..b248b5e42f942 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -327,11 +327,10 @@ - The maximum number of application attempts. It's a global - setting for all application masters. Each application master can specify - its individual maximum number of application attempts via the API, but the - individual number cannot be more than the global upper bound. If it is, - the resourcemanager will override it. The default number is set to 2, to + The default maximum number of application attempts, if unset by + the user. Each application master can specify its individual maximum number of application + attempts via the API, but the individual number cannot be more than the global upper bound in + yarn.resourcemanager.am.global.max-attempts. The default number is set to 2, to allow at least one retry for AM. yarn.resourcemanager.am.max-attempts 2 @@ -805,6 +804,16 @@ 12800 + + If true, ResourceManager will always try to cancel delegation + tokens after the application completes, even if the client sets + shouldCancelAtEnd false. References to delegation tokens are tracked, + so they will not be canceled until all sub-tasks are done using them. + + yarn.resourcemanager.delegation-token.always-cancel + false + + If true, ResourceManager will have proxy-user privileges. Use case: In a secure cluster, YARN requires the user hdfs delegation-tokens to @@ -845,6 +854,56 @@ 1000 + + Enables heart-beat interval scaling. The NodeManager + heart-beat interval will scale based on the difference between the CPU + utilization on the node and the cluster-wide average CPU utilization. + + + yarn.resourcemanager.nodemanagers.heartbeat-interval-scaling-enable + + false + + + + If heart-beat interval scaling is enabled, this is the + minimum heart-beat interval in milliseconds + + yarn.resourcemanager.nodemanagers.heartbeat-interval-min-ms + 1000 + + + + If heart-beat interval scaling is enabled, this is the + maximum heart-beat interval in milliseconds + yarn.resourcemanager.nodemanagers.heartbeat-interval-max-ms + 1000 + + + + If heart-beat interval scaling is enabled, this controls + the degree of adjustment when speeding up heartbeat intervals. + At 1.0, 20% less than average CPU utilization will result in a 20% + decrease in heartbeat interval. + + + yarn.resourcemanager.nodemanagers.heartbeat-interval-speedup-factor + + 1.0 + + + + If heart-beat interval scaling is enabled, this controls + the degree of adjustment when slowing down heartbeat intervals. + At 1.0, 20% greater than average CPU utilization will result in a 20% + increase in heartbeat interval. + + + yarn.resourcemanager.nodemanagers.heartbeat-interval-slowdown-factor + + 1.0 + + The minimum allowed version of a connecting nodemanager. The valid values are NONE (no version checking), EqualToRM (the nodemanager's version is equal to @@ -1123,6 +1182,17 @@ MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX + + + * PATH components that will be prepended to the user's path. + * If this is defined and the user does not define PATH, NM will also + * append ":$PATH" to prevent this from eclipsing the PATH defined in + * the container. This feature is only available for Linux. + + yarn.nodemanager.force.path + + + Environment variables that containers may override rather than use NodeManager's default. yarn.nodemanager.env-whitelist @@ -1650,6 +1720,34 @@ yarn.nodemanager.container-monitor.interval-ms + + Flag to enable the container log monitor which enforces + container log directory size limits. + yarn.nodemanager.container-log-monitor.enable + false + + + + How often to check the usage of a container's log directories + in milliseconds + yarn.nodemanager.container-log-monitor.interval-ms + 60000 + + + + The disk space limit, in bytes, for a single + container log directory + yarn.nodemanager.container-log-monitor.dir-size-limit-bytes + 1000000000 + + + + The disk space limit, in bytes, for all of a container's + logs + yarn.nodemanager.container-log-monitor.total-size-limit-bytes + 10000000000 + + Class that calculates containers current resource utilization. If not set, the value for yarn.nodemanager.resource-calculator.class will @@ -1669,6 +1767,13 @@ 1200000 + + Whether or not to run the node health script + before the NM starts up. + yarn.nodemanager.health-checker.run-before-startup + false + + Frequency of running node health scripts. yarn.nodemanager.health-checker.interval-ms @@ -1691,12 +1796,27 @@ 0.25 + + Enable/Disable the disk utilisation percentage + threshold for disk health checker. + yarn.nodemanager.disk-health-checker.disk-utilization-threshold.enabled + true + + + + Enable/Disable the minimum disk free + space threshold for disk health checker. + yarn.nodemanager.disk-health-checker.disk-free-space-threshold.enabled + true + + The maximum percentage of disk space utilization allowed after which a disk is marked as bad. Values can range from 0.0 to 100.0. If the value is greater than or equal to 100, the nodemanager will check for full disk. This applies to yarn.nodemanager.local-dirs and - yarn.nodemanager.log-dirs. + yarn.nodemanager.log-dirs when + yarn.nodemanager.disk-health-checker.disk-utilization-threshold.enabled is true. yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage 90.0 @@ -1716,7 +1836,8 @@ The minimum space in megabytes that must be available on a disk for it to be used. If space on a disk falls below this threshold, it will be marked as bad. This applies to yarn.nodemanager.local-dirs and - yarn.nodemanager.log-dirs. + yarn.nodemanager.log-dirs when + yarn.nodemanager.disk-health-checker.disk-free-space-threshold.enabled is true. yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb 0 @@ -3324,6 +3445,42 @@ 20 + + + Used to specify custom web services for Resourcemanager. Value can be + classnames separated by comma. + Ex: org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices, + org.apache.hadoop.yarn.server.resourcemanager.webapp.DummyClass + + yarn.http.rmwebapp.external.classes + + + + + + Used to specify custom scheduler page + + yarn.http.rmwebapp.scheduler.page.class + + + + + + Used to specify custom DAO classes used by custom web services. + + yarn.http.rmwebapp.custom.dao.classes + + + + + + Used to specify custom DAO classes used by custom web services which requires + root unwrapping. + + yarn.http.rmwebapp.custom.unwrapped.dao.classes + + + The Node Label script to run. Script output Line starting with "NODE_PARTITION:" will be considered as Node Label Partition. In case of @@ -4534,4 +4691,31 @@ yarn.webapp.enable-rest-app-submissions true + + + + The maximum number of application attempts. It's a global + setting for all application masters. Each application master can specify + its individual maximum number of application attempts via the API, but the + individual number cannot be more than the global upper bound. If it is, + the resourcemanager will override it. The default number value is set to + yarn.resourcemanager.am.max-attempts. + + yarn.resourcemanager.am.global.max-attempts + + + + + Max number of application tags set by user in ApplicationSubmissionContext + while submitting application + yarn.resourcemanager.application.max-tags + 10 + + + + Max length of each application tag set by user in ApplicationSubmissionContext + while submitting application. + yarn.resourcemanager.application.max-tag.length + 100 + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java index 401cfec2e99d6..f59f00e6e853f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java @@ -23,7 +23,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.YarnApplicationState; -import com.google.common.collect.Iterators; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; /** * Utilities to generate fake test apps diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java index 25eb9e1454a14..affa08f082437 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.yarn.api; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.commons.lang3.Range; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index cdb6c4664fb60..980ec0da9f1c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -375,7 +375,7 @@ import org.junit.Ignore; import org.junit.Test; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; /** * Test class for YARN API protocol records. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java index 762e2280ca33b..55ddd12fce98e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java @@ -97,12 +97,23 @@ private enum DummyType { } private static class TestHandler implements EventHandler { + + private long sleepTime = 1500; + + TestHandler() { + } + + TestHandler(long sleepTime) { + this.sleepTime = sleepTime; + } + @Override public void handle(Event event) { try { // As long as 10000 events queued - Thread.sleep(1500); - } catch (InterruptedException e) {} + Thread.sleep(this.sleepTime); + } catch (InterruptedException e) { + } } } @@ -170,11 +181,54 @@ public void testPrintDispatcherEventDetails() throws Exception { //Make sure more than one event to take verify(log, atLeastOnce()). info("Latest dispatch event type: TestEventType"); + } finally { + //... restore logger object + logger.set(null, oldLog); dispatcher.stop(); + } + } + + //Test print dispatcher details when the blocking queue is heavy + @Test(timeout = 60000) + public void testPrintDispatcherEventDetailsAvoidDeadLoop() throws Exception { + for (int i = 0; i < 5; i++) { + testPrintDispatcherEventDetailsAvoidDeadLoopInternal(); + } + } + + public void testPrintDispatcherEventDetailsAvoidDeadLoopInternal() + throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + conf.setInt(YarnConfiguration. + YARN_DISPATCHER_PRINT_EVENTS_INFO_THRESHOLD, 10); + Logger log = mock(Logger.class); + AsyncDispatcher dispatcher = new AsyncDispatcher(); + dispatcher.init(conf); + + Field logger = AsyncDispatcher.class.getDeclaredField("LOG"); + logger.setAccessible(true); + Field modifiers = Field.class.getDeclaredField("modifiers"); + modifiers.setAccessible(true); + modifiers.setInt(logger, logger.getModifiers() & ~Modifier.FINAL); + Object oldLog = logger.get(null); + + try { + logger.set(null, log); + dispatcher.register(TestEnum.class, new TestHandler(0)); + dispatcher.start(); + + for (int i = 0; i < 10000; ++i) { + Event event = mock(Event.class); + when(event.getType()).thenReturn(TestEnum.TestEventType); + dispatcher.getEventHandler().handle(event); + } + Thread.sleep(3000); } finally { //... restore logger object logger.set(null, oldLog); + dispatcher.stop(); } } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java index 9ae2983ca0fbb..bf20fb7429275 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java @@ -33,6 +33,10 @@ import java.io.StringWriter; import java.io.UnsupportedEncodingException; import java.io.Writer; +import java.net.URI; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.Collections; import java.util.concurrent.CountDownLatch; @@ -121,6 +125,20 @@ public void testForCorruptedAggregatedLogs() throws Exception { Assert.fail("Aggregated logs are corrupted."); } } + + //Append some corrupted text to the end of the aggregated file. + URI logUri = URI.create("file:///" + remoteAppLogFile.toUri().toString()); + Files.write(Paths.get(logUri), + "corrupt_text".getBytes(), StandardOpenOption.APPEND); + try { + // Trying to read a corrupted log file created above should cause + // log reading to fail below with an IOException. + logReader = new LogReader(conf, remoteAppLogFile); + Assert.fail("Expect IOException from reading corrupt aggregated logs."); + } catch (IOException ioe) { + DataInputStream dIS = logReader.next(rLogKey); + Assert.assertNull("Input stream not available for reading", dIS); + } } private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java index 28b9497fa36ce..9cb2b95e95202 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java @@ -29,8 +29,8 @@ import org.apache.hadoop.yarn.api.records.NodeLabel; import org.junit.Assert; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class NodeLabelTestBase { public static void assertMapEquals(Map> expected, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java index a9894ff75493d..d9f9389866e43 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java @@ -37,9 +37,9 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestCommonNodeLabelsManager extends NodeLabelTestBase { DummyCommonNodeLabelsManager mgr = null; @@ -616,4 +616,22 @@ public void testGetNodeLabelsInfo() throws IOException { toNodeId("n1"), toSet(NodeLabel.newInstance("p2", true)), toNodeId("n2"), toSet(NodeLabel.newInstance("p3", false)))); } + + @Test(timeout = 5000) + public void testRemoveNodeLabelsInfo() throws IOException { + mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p1", true))); + mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p2", true))); + mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p1"))); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p2"))); + + Map> labelsToNodes = mgr.getLabelsToNodes(); + assertLabelsToNodesEquals( + labelsToNodes, + ImmutableMap.of( + "p2", toSet(toNodeId("n1:1"), toNodeId("n1:0")))); + + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), new HashSet())); + Map> labelsToNodes2 = mgr.getLabelsToNodes(); + Assert.assertEquals(labelsToNodes2.get("p2"), null); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java index 36dbc2b9bc8ad..f0885cd50f2c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java @@ -38,7 +38,7 @@ import org.junit.runners.Parameterized; import org.mockito.Mockito; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; @RunWith(Parameterized.class) public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtil.java index 060e38d8884e3..73e0bda91f6eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtil.java @@ -19,7 +19,7 @@ import static org.junit.Assert.fail; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.api.records.NodeAttribute; import org.apache.hadoop.yarn.api.records.NodeAttributeType; import org.junit.Assert; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java index 198b395ab84a2..f035da831d9b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/resourcetypes/ResourceTypesTestHelper.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.resourcetypes; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.factories.RecordFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java index 678687fa58270..59b779c071df4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java @@ -78,9 +78,9 @@ import org.junit.AfterClass; import org.junit.Test; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; /** * Unit test for the FSDownload class. @@ -711,4 +711,78 @@ public void testUniqueDestinationPath() throws Exception { // destination directory (passed as an argument) + file name. Assert.assertEquals(destPath, rPath.get().getParent()); } + + /** + * This test method is responsible for creating an IOException resulting + * from modification to the local resource's timestamp on the source FS just + * before the download of this local resource has started. + */ + @Test(timeout=10000) + public void testResourceTimestampChangeDuringDownload() + throws IOException, InterruptedException { + conf = new Configuration(); + FileContext files = FileContext.getLocalFSFileContext(conf); + final Path basedir = files.makeQualified( + new Path("target", TestFSDownload.class.getSimpleName())); + files.mkdir(basedir, null, true); + conf.setStrings(TestFSDownload.class.getName(), basedir.toString()); + + LocalDirAllocator dirs = + new LocalDirAllocator(TestFSDownload.class.getName()); + + Path path = new Path(basedir, "test-file"); + Random rand = new Random(); + long sharedSeed = rand.nextLong(); + rand.setSeed(sharedSeed); + int size = 512; + LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC; + LocalResource localResource = createFile(files, path, size, rand, vis); + + Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf); + destPath = new Path(destPath, + Long.toString(uniqueNumberGenerator.incrementAndGet())); + + FSDownload fsDownload = new FSDownload(files, + UserGroupInformation.getCurrentUser(), conf, destPath, localResource); + + // Store the original local resource timestamp used to set up the + // FSDownload object just before (but before the download starts) + // for comparison purposes later on. + long origLRTimestamp = localResource.getTimestamp(); + + // Modify the local resource's timestamp to yesterday on the Filesystem + // just before FSDownload starts. + final long msInADay = 86400 * 1000; + long modifiedFSTimestamp = origLRTimestamp - msInADay; + try { + Path sourceFsPath = localResource.getResource().toPath(); + FileSystem sourceFs = sourceFsPath.getFileSystem(conf); + sourceFs.setTimes(sourceFsPath, modifiedFSTimestamp, modifiedFSTimestamp); + } catch (URISyntaxException use) { + Assert.fail("No exception expected."); + } + + // Execute the FSDownload operation. + Map> pending = new HashMap<>(); + ExecutorService exec = HadoopExecutors.newSingleThreadExecutor(); + pending.put(localResource, exec.submit(fsDownload)); + + exec.shutdown(); + + exec.awaitTermination(1000, TimeUnit.MILLISECONDS); + Assert.assertTrue(pending.get(localResource).isDone()); + + try { + for (Map.Entry> p : pending.entrySet()) { + p.getValue().get(); + } + Assert.fail("Exception expected from timestamp update during download"); + } catch (ExecutionException ee) { + Assert.assertTrue(ee.getCause() instanceof IOException); + Assert.assertTrue("Exception contains original timestamp", + ee.getMessage().contains(Times.formatISO8601(origLRTimestamp))); + Assert.assertTrue("Exception contains modified timestamp", + ee.getMessage().contains(Times.formatISO8601(modifiedFSTimestamp))); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/CustomResourceTypesConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/CustomResourceTypesConfigurationProvider.java index 45c01e0c96f0b..1f52f40cb504a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/CustomResourceTypesConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/CustomResourceTypesConfigurationProvider.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.util.resource; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.LocalConfigurationProvider; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java index f1c8c89800576..39f33990270f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java @@ -21,7 +21,7 @@ import java.util.Arrays; import java.util.Collection; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/WebServicesTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/WebServicesTestUtils.java index 6d9e70fa32311..b4832eff42002 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/WebServicesTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/WebServicesTestUtils.java @@ -94,6 +94,24 @@ public static String getXmlString(Element element, String name) { return val; } + public static String getPropertyValue(Element element, String elementName, + String propertyName) { + NodeList id = element.getElementsByTagName(elementName); + Element line = (Element) id.item(0); + if (line == null) { + return null; + } + NodeList properties = line.getChildNodes(); + for (int i = 0; i < properties.getLength(); i++) { + Element property = (Element) properties.item(i); + if (getXmlString(property, "name").equals(propertyName)) { + return getXmlString(property, "value"); + } + } + return null; + } + + public static String getXmlAttrString(Element element, String name) { Attr at = element.getAttributeNode(name); if (at != null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml index 9324353e2e6d6..3cbb9514093e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml @@ -18,7 +18,7 @@ hadoop-yarn org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-csi @@ -27,16 +27,14 @@ 3.6.1 - 20.0 1.26.0 1.5.0.Final - com.google.guava - guava - ${guava.version} + org.apache.hadoop.thirdparty + hadoop-shaded-guava com.google.protobuf @@ -66,6 +64,18 @@ io.grpc grpc-netty ${grpc.version} + + + + io.netty + netty-codec-http2 + + + + io.netty + netty-handler-proxy + + junit diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestCsiAdaptorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestCsiAdaptorService.java index 2375b06bc4240..206fb1433a1d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestCsiAdaptorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestCsiAdaptorService.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.yarn.csi.adaptor; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import csi.v0.Csi; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; @@ -26,6 +24,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.yarn.api.CsiAdaptorProtocol; import org.apache.hadoop.yarn.api.CsiAdaptorPlugin; import org.apache.hadoop.yarn.api.impl.pb.client.CsiAdaptorProtocolPBClientImpl; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestValidateVolumeCapabilityRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestValidateVolumeCapabilityRequest.java index 303cfc4493566..127ddb3383d91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestValidateVolumeCapabilityRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestValidateVolumeCapabilityRequest.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.csi.adaptor; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest; import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest.VolumeCapability; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ValidateVolumeCapabilitiesRequestPBImpl; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java index 9e7ac1952be5c..f025bc6d72fda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java @@ -20,7 +20,7 @@ import csi.v0.Csi; import org.apache.commons.io.FileUtils; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Assume; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml index 9618a062e37ab..88d31476c297a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-registry - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Registry diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml index 81b68238a838b..154a8cf0aea59 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml @@ -22,11 +22,11 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-applicationhistoryservice - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN ApplicationHistoryService @@ -133,8 +133,8 @@ jersey-client - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java index ecaf0fa89a847..9502af469d1bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java @@ -58,7 +58,7 @@ import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java index 385b8062ff3d6..55fc6518744a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java @@ -39,7 +39,7 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.records.ContainerHistoryData; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java index 8afcd6aa454c7..1ea064039f014 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java @@ -67,7 +67,7 @@ import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java index 4e3a1e603a38c..5f1d7c29de210 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java @@ -56,7 +56,7 @@ import org.eclipse.jetty.servlet.FilterHolder; import org.eclipse.jetty.webapp.WebAppContext; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java index 18420f332c401..4cc22deb0d114 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java @@ -33,7 +33,7 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -210,7 +210,7 @@ public ContainerInfo getContainer(@Context HttpServletRequest req, * The container ID * @param nmId * The Node Manager NodeId - * @param redirected_from_node + * @param redirectedFromNode * Whether this is a redirected request from NM * @return * The log file's name and current file size @@ -224,14 +224,16 @@ public Response getContainerLogsInfo( @PathParam(YarnWebServiceParams.CONTAINER_ID) String containerIdStr, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) - @DefaultValue("false") boolean redirected_from_node) { + @DefaultValue("false") boolean redirectedFromNode, + @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) + @DefaultValue("false") boolean manualRedirection) { initForReadableEndpoints(res); WrappedLogMetaRequest.Builder logMetaRequestBuilder = LogServlet.createRequestFromContainerId(containerIdStr); return logServlet.getContainerLogsInfo(req, logMetaRequestBuilder, nmId, - redirected_from_node, null); + redirectedFromNode, null, manualRedirection); } /** @@ -251,7 +253,7 @@ public Response getContainerLogsInfo( * the size of the log file * @param nmId * The Node Manager NodeId - * @param redirected_from_node + * @param redirectedFromNode * Whether this is the redirect request from NM * @return * The contents of the container's log file @@ -269,9 +271,11 @@ public Response getContainerLogFile(@Context HttpServletRequest req, @QueryParam(YarnWebServiceParams.RESPONSE_CONTENT_SIZE) String size, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) - boolean redirected_from_node) { + boolean redirectedFromNode, + @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) + @DefaultValue("false") boolean manualRedirection) { return getLogs(req, res, containerIdStr, filename, format, - size, nmId, redirected_from_node); + size, nmId, redirectedFromNode, manualRedirection); } //TODO: YARN-4993: Refactory ContainersLogsBlock, AggregatedLogsBlock and @@ -290,10 +294,12 @@ public Response getLogs(@Context HttpServletRequest req, @QueryParam(YarnWebServiceParams.RESPONSE_CONTENT_SIZE) String size, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) - @DefaultValue("false") boolean redirected_from_node) { + @DefaultValue("false") boolean redirectedFromNode, + @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) + @DefaultValue("false") boolean manualRedirection) { initForReadableEndpoints(res); return logServlet.getLogFile(req, containerIdStr, filename, format, size, - nmId, redirected_from_node, null); + nmId, redirectedFromNode, null, manualRedirection); } @VisibleForTesting diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java index 5d1a81ac79eb4..7d34a048ab0bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.timeline; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.collections.map.LRUMap; import org.apache.commons.io.FileUtils; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java index e85505f73e877..1d6fa369d5283 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.timeline; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.io.File; import java.io.IOException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java index c5381967e13ea..bf40d41c8ff76 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java @@ -42,7 +42,7 @@ import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager; import org.apache.hadoop.yarn.webapp.BadRequestException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java index bcd57ef1d020f..1250dcc18d730 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java @@ -27,7 +27,7 @@ import java.io.File; import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java index 47f075ff93e6a..a7c5f924b22fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java @@ -38,7 +38,7 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore; import org.apache.hadoop.yarn.util.StringHelper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml index 51cbb8ba74129..381e53d54094f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-common - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Server Common @@ -72,8 +72,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava @@ -141,16 +141,6 @@ curator-test test - - com.sun.jersey.jersey-test-framework - jersey-test-framework-core - - - com.sun.jersey.jersey-test-framework - jersey-test-framework-core - 1.19 - test - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java index 7e8addd99b9bc..5ec1f1ab15a76 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java @@ -32,8 +32,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Extends Thread and provides an implementation that is used for processing the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java index ac43b122f4c06..c41b53da43f78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java @@ -56,7 +56,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A component that sits in between AMRMClient(Impl) and Yarn RM. It remembers diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java index ee9956ffaddb7..9f1870188ece6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java @@ -26,7 +26,7 @@ import org.apache.hadoop.yarn.client.RMProxy; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/CollectorNodemanagerProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/CollectorNodemanagerProtocolPBClientImpl.java index af75038096c77..6d2bb5ddaf1f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/CollectorNodemanagerProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/CollectorNodemanagerProtocolPBClientImpl.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -63,7 +63,7 @@ public class CollectorNodemanagerProtocolPBClientImpl implements public CollectorNodemanagerProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, CollectorNodemanagerProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); int expireIntvl = conf.getInt(NM_COMMAND_TIMEOUT, DEFAULT_COMMAND_TIMEOUT); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/DistributedSchedulingAMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/DistributedSchedulingAMProtocolPBClientImpl.java index 4bd803f755565..f2527fc13a21c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/DistributedSchedulingAMProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/DistributedSchedulingAMProtocolPBClientImpl.java @@ -20,7 +20,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos; import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocol; @@ -63,7 +63,7 @@ public class DistributedSchedulingAMProtocolPBClientImpl implements public DistributedSchedulingAMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, DistributedSchedulingAMProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(DistributedSchedulingAMProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java index 650df85a01e92..76622e3a1440e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -52,7 +52,8 @@ public class ResourceTrackerPBClientImpl implements ResourceTracker, Closeable { private ResourceTrackerPB proxy; public ResourceTrackerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, ResourceTrackerPB.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, ResourceTrackerPB.class, + ProtobufRpcEngine2.class); proxy = (ResourceTrackerPB)RPC.getProxy( ResourceTrackerPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMUploaderProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMUploaderProtocolPBClientImpl.java index 32f0bce4eb40f..d484ac1ab3d11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMUploaderProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/SCMUploaderProtocolPBClientImpl.java @@ -23,7 +23,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -50,7 +50,7 @@ public class SCMUploaderProtocolPBClientImpl implements public SCMUploaderProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, SCMUploaderProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); proxy = RPC.getProxy(SCMUploaderProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java index acec16fd56b21..54b39155c63ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.util.Records; public abstract class RegisterNodeManagerRequest { @@ -53,14 +54,15 @@ public static RegisterNodeManagerRequest newInstance(NodeId nodeId, Resource physicalResource) { return newInstance(nodeId, httpPort, resource, nodeManagerVersionId, containerStatuses, runningApplications, nodeLabels, physicalResource, - null); + null, null); } public static RegisterNodeManagerRequest newInstance(NodeId nodeId, int httpPort, Resource resource, String nodeManagerVersionId, List containerStatuses, List runningApplications, Set nodeLabels, - Resource physicalResource, Set nodeAttributes) { + Resource physicalResource, Set nodeAttributes, + NodeStatus nodeStatus) { RegisterNodeManagerRequest request = Records.newRecord(RegisterNodeManagerRequest.class); request.setHttpPort(httpPort); @@ -72,6 +74,7 @@ public static RegisterNodeManagerRequest newInstance(NodeId nodeId, request.setNodeLabels(nodeLabels); request.setPhysicalResource(physicalResource); request.setNodeAttributes(nodeAttributes); + request.setNodeStatus(nodeStatus); return request; } @@ -133,4 +136,16 @@ public abstract void setLogAggregationReportsForApps( public abstract Set getNodeAttributes(); public abstract void setNodeAttributes(Set nodeAttributes); + + /** + * Get the status of the node. + * @return The status of the node. + */ + public abstract NodeStatus getNodeStatus(); + + /** + * Set the status of the node. + * @param nodeStatus The status of the node. + */ + public abstract void setNodeStatus(NodeStatus nodeStatus); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java index 317f8abd6f113..d91cff2531a5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.LogAggregationReportProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerStatusProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto; @@ -51,7 +52,9 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; - +import org.apache.hadoop.yarn.server.api.records.NodeStatus; +import org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl; + public class RegisterNodeManagerRequestPBImpl extends RegisterNodeManagerRequest { RegisterNodeManagerRequestProto proto = RegisterNodeManagerRequestProto.getDefaultInstance(); RegisterNodeManagerRequestProto.Builder builder = null; @@ -68,6 +71,7 @@ public class RegisterNodeManagerRequestPBImpl extends RegisterNodeManagerRequest /** Physical resources in the node. */ private Resource physicalResource = null; + private NodeStatus nodeStatus; public RegisterNodeManagerRequestPBImpl() { builder = RegisterNodeManagerRequestProto.newBuilder(); @@ -121,6 +125,9 @@ private synchronized void mergeLocalToBuilder() { if (this.logAggregationReportsForApps != null) { addLogAggregationStatusForAppsToProto(); } + if (this.nodeStatus != null) { + builder.setNodeStatus(convertToProtoFormat(this.nodeStatus)); + } } private void addLogAggregationStatusForAppsToProto() { @@ -359,6 +366,28 @@ public synchronized void setPhysicalResource(Resource pPhysicalResource) { this.physicalResource = pPhysicalResource; } + @Override + public synchronized NodeStatus getNodeStatus() { + RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder; + if (this.nodeStatus != null) { + return this.nodeStatus; + } + if (!p.hasNodeStatus()) { + return null; + } + this.nodeStatus = convertFromProtoFormat(p.getNodeStatus()); + return this.nodeStatus; + } + + @Override + public synchronized void setNodeStatus(NodeStatus pNodeStatus) { + maybeInitBuilder(); + if (pNodeStatus == null) { + builder.clearNodeStatus(); + } + this.nodeStatus = pNodeStatus; + } + @Override public int hashCode() { return getProto().hashCode(); @@ -533,4 +562,12 @@ public synchronized void setLogAggregationReportsForApps( } this.logAggregationReportsForApps = logAggregationStatusForApps; } + + private NodeStatusPBImpl convertFromProtoFormat(NodeStatusProto s) { + return new NodeStatusPBImpl(s); + } + + private NodeStatusProto convertToProtoFormat(NodeStatus s) { + return ((NodeStatusPBImpl)s).getProto(); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java index cf6d1ef5bd04d..04dae3d42e9e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java @@ -42,8 +42,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * A FailoverProxyProvider implementation that uses the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java index 3aeeca3c73841..5a0cc95d1ef03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Utility class for Federation policy. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java index bbf08e096144b..6bde4a312b4ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class provides a facade to the policy subsystem, and handles the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java index 9c9f76241b1ea..f4df9216abade 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java @@ -50,8 +50,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * An implementation of the {@link FederationAMRMProxyPolicy} interface that diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/PriorityBroadcastPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/PriorityBroadcastPolicyManager.java index 8139e1202dc4a..49657ba53abc1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/PriorityBroadcastPolicyManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/PriorityBroadcastPolicyManager.java @@ -25,7 +25,7 @@ import org.apache.hadoop.yarn.server.federation.policies.router.PriorityRouterPolicy; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Policy that allows operator to configure "weights" for routing. This picks a diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/WeightedLocalityPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/WeightedLocalityPolicyManager.java index a14450117b918..2d494b95fffaa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/WeightedLocalityPolicyManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/WeightedLocalityPolicyManager.java @@ -25,7 +25,7 @@ import org.apache.hadoop.yarn.server.federation.policies.router.LocalityRouterPolicy; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Policy that allows operator to configure "weights" for routing. This picks a diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java index a1f7666a9ff7b..b81ca07b42ad8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java @@ -24,6 +24,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; @@ -65,6 +66,10 @@ public SubClusterId getHomeSubcluster( chosen = id; } } + if (chosen == null) { + throw new FederationPolicyException( + "No Active Subcluster with weight vector greater than zero"); + } return chosen; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java index 27b46cde8e58f..0dda0c5258fe1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Performance metrics for FederationStateStore implementations. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java index 4d59bbcb833ea..04feb4a7f2ebc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java @@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.protobuf.TextFormat; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java index 13545c9d2efbb..b66bcef59d1f0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationRegistryClient.java @@ -40,7 +40,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Helper class that handles reads and writes to Yarn Registry to support UAM HA diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java index 60b794f36d156..a067a04196245 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java @@ -69,7 +69,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/AMRMClientRelayerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/AMRMClientRelayerMetrics.java index 6ce58519fa749..6f6a856876f14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/AMRMClientRelayerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/AMRMClientRelayerMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.metrics; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsInfo; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/OpportunisticSchedulerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/OpportunisticSchedulerMetrics.java index 585f8f1c0effc..de5700fd5b6f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/OpportunisticSchedulerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/OpportunisticSchedulerMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.metrics; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsSystem; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java index 1f82b9d9d465f..61a196ba939b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.scheduler; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java index 96c3cdf420db2..7bb811fbbdb5a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.timeline.security; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.FilterContainer; import org.apache.hadoop.http.FilterInitializer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java index 7072030bd84e5..fb0487f25dc16 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java @@ -55,7 +55,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A service that manages a pool of UAM managers in diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java index 47d783094667b..b471d6592b13b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java @@ -68,8 +68,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * UnmanagedApplicationManager is used to register unmanaged application and diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index b9f35a52a06af..72f97a298e14b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -72,7 +72,7 @@ import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.server.api.ContainerType; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeCapabilityRange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeCapabilityRange.java index e4775fe92e290..8009d9f85ed33 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeCapabilityRange.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeCapabilityRange.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.volume.csi; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.yarn.server.volume.csi.exception.InvalidVolumeException; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeMetaData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeMetaData.java index 7f2c92ca81877..bd26807345ccf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeMetaData.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/volume/csi/VolumeMetaData.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.volume.csi; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import com.google.gson.JsonObject; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.server.volume.csi.exception.InvalidVolumeException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java index b89ca02e09dca..40bd7024cf3a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java @@ -136,8 +136,7 @@ public void render(Block html) { try { fetchData(); - } - catch( Exception e) { + } catch (YarnException | IOException | InterruptedException e) { String message = "Failed to read the applications."; LOG.error(message, e); html.p().__(message).__(); @@ -150,7 +149,8 @@ protected void renderData(Block html) { TBODY> tbody = html.table("#apps").thead().tr().th(".id", "ID").th(".user", "User") .th(".name", "Name").th(".type", "Application Type") - .th(".queue", "Queue").th(".priority", "Application Priority") + .th(".apptag", "Application Tags").th(".queue", "Queue") + .th(".priority", "Application Priority") .th(".starttime", "StartTime") .th(".launchtime", "LaunchTime") .th(".finishtime", "FinishTime") @@ -185,6 +185,10 @@ protected void renderData(Block html) { .append( StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app .getType()))) + .append("\",\"") + .append( + StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4( + app.getApplicationTags() == null ? "" : app.getApplicationTags()))) .append("\",\"") .append( StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java index 2bf643f799632..88475ca19fde2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.webapp; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.google.inject.Inject; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogServlet.java index d889344d8a109..dfeeefe5712d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogServlet.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.webapp; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.UniformInterfaceException; import org.apache.hadoop.conf.Configuration; @@ -44,7 +44,10 @@ import javax.ws.rs.core.GenericEntity; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; +import javax.ws.rs.core.StreamingOutput; +import java.nio.charset.Charset; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -52,21 +55,30 @@ * Used by various WebServices (AHS, ATS). */ public class LogServlet extends Configured { + private static final Logger LOG = LoggerFactory .getLogger(LogServlet.class); private static final Joiner JOINER = Joiner.on(""); private static final String NM_DOWNLOAD_URI_STR = "/ws/v1/node/containers"; - private final LogAggregationFileControllerFactory factory; + private LogAggregationFileControllerFactory factoryInstance = null; private final AppInfoProvider appInfoProvider; public LogServlet(Configuration conf, AppInfoProvider appInfoProvider) { super(conf); - this.factory = new LogAggregationFileControllerFactory(conf); this.appInfoProvider = appInfoProvider; } + private LogAggregationFileControllerFactory getOrCreateFactory() { + if (factoryInstance != null) { + return factoryInstance; + } else { + factoryInstance = new LogAggregationFileControllerFactory(getConf()); + return factoryInstance; + } + } + @VisibleForTesting public String getNMWebAddressFromRM(String nodeId) throws ClientHandlerException, UniformInterfaceException, JSONException { @@ -164,7 +176,7 @@ private void validateUserInput(ApplicationId applicationId, public Response getLogsInfo(HttpServletRequest hsr, String appIdStr, String appAttemptIdStr, String containerIdStr, String nmId, - boolean redirectedFromNode) { + boolean redirectedFromNode, boolean manualRedirection) { ApplicationId appId = null; if (appIdStr != null) { try { @@ -201,9 +213,10 @@ public Response getLogsInfo(HttpServletRequest hsr, String appIdStr, .setContainerId(containerIdStr); return getContainerLogsInfo(hsr, logMetaRequestBuilder, nmId, - redirectedFromNode, null); + redirectedFromNode, null, manualRedirection); } + /** * Returns information about the logs for a specific container. * @@ -212,14 +225,16 @@ public Response getLogsInfo(HttpServletRequest hsr, String appIdStr, * @param nmId NodeManager id * @param redirectedFromNode whether the request was redirected * @param clusterId the id of the cluster + * @param manualRedirection whether to return a response with a Location + * instead of an automatic redirection * @return {@link Response} object containing information about the logs */ public Response getContainerLogsInfo(HttpServletRequest req, WrappedLogMetaRequest.Builder builder, String nmId, boolean redirectedFromNode, - String clusterId) { + String clusterId, boolean manualRedirection) { - builder.setFactory(factory); + builder.setFactory(getOrCreateFactory()); BasicAppInfo appInfo; try { @@ -287,6 +302,10 @@ public Response getContainerLogsInfo(HttpServletRequest req, if (query != null && !query.isEmpty()) { resURI += "?" + query; } + if (manualRedirection) { + return createLocationResponse(resURI, createEmptyLogsInfo()); + } + Response.ResponseBuilder response = Response.status( HttpServletResponse.SC_TEMPORARY_REDIRECT); response.header("Location", resURI); @@ -297,6 +316,32 @@ public Response getContainerLogsInfo(HttpServletRequest req, } } + /** + * Creates a response with empty payload and a location header to preserve + * API compatibility. + * + * @param uri redirection url + * @param emptyPayload a payload that is discarded + * @return a response with empty payload + */ + private static Response createLocationResponse( + String uri, T emptyPayload) { + Response.ResponseBuilder response = Response.status( + HttpServletResponse.SC_OK).entity(emptyPayload); + response.header("Location", uri); + response.header("Access-Control-Expose-Headers", "Location"); + return response.build(); + } + + private static GenericEntity> createEmptyLogsInfo() { + return new GenericEntity>( + Collections.EMPTY_LIST, List.class); + } + + private static StreamingOutput createEmptyStream() { + return outputStream -> outputStream.write( + "".getBytes(Charset.defaultCharset())); + } /** * Returns an aggregated log file belonging to a container. @@ -309,11 +354,13 @@ public Response getContainerLogsInfo(HttpServletRequest req, * @param nmId NodeManager id * @param redirectedFromNode whether the request was redirected * @param clusterId the id of the cluster + * @param manualRedirection whether to return a response with a Location + * instead of an automatic redirection * @return {@link Response} object containing information about the logs */ public Response getLogFile(HttpServletRequest req, String containerIdStr, String filename, String format, String size, String nmId, - boolean redirectedFromNode, String clusterId) { + boolean redirectedFromNode, String clusterId, boolean manualRedirection) { ContainerId containerId; try { containerId = ContainerId.fromString(containerIdStr); @@ -322,6 +369,8 @@ public Response getLogFile(HttpServletRequest req, String containerIdStr, "Invalid ContainerId: " + containerIdStr); } + LogAggregationFileControllerFactory factory = getOrCreateFactory(); + final long length = LogWebServiceUtils.parseLongParam(size); ApplicationId appId = containerId.getApplicationAttemptId() @@ -388,6 +437,12 @@ public Response getLogFile(HttpServletRequest req, String containerIdStr, if (query != null && !query.isEmpty()) { resURI += "?" + query; } + + + if (manualRedirection) { + return createLocationResponse(resURI, createEmptyStream()); + } + Response.ResponseBuilder response = Response.status( HttpServletResponse.SC_TEMPORARY_REDIRECT); response.header("Location", resURI); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java index 9a9ec77dd75d3..49c72009a4b19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.webapp; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import com.google.inject.Singleton; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; @@ -149,14 +149,16 @@ public Response getContainerLogsInfo(@Context HttpServletRequest req, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) @DefaultValue("false") boolean redirectedFromNode, - @QueryParam(YarnWebServiceParams.CLUSTER_ID) String clusterId) { + @QueryParam(YarnWebServiceParams.CLUSTER_ID) String clusterId, + @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) + @DefaultValue("false") boolean manualRedirection) { initForReadableEndpoints(res); WrappedLogMetaRequest.Builder logMetaRequestBuilder = LogServlet.createRequestFromContainerId(containerIdStr); return logServlet.getContainerLogsInfo(req, logMetaRequestBuilder, nmId, - redirectedFromNode, clusterId); + redirectedFromNode, clusterId, manualRedirection); } @Override @@ -256,9 +258,11 @@ public Response getContainerLogFile( @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) boolean redirectedFromNode, - @QueryParam(YarnWebServiceParams.CLUSTER_ID) String clusterId) { + @QueryParam(YarnWebServiceParams.CLUSTER_ID) String clusterId, + @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) + @DefaultValue("false") boolean manualRedirection) { return getLogs(req, res, containerIdStr, filename, format, size, nmId, - redirectedFromNode, clusterId); + redirectedFromNode, clusterId, manualRedirection); } //TODO: YARN-4993: Refactory ContainersLogsBlock, AggregatedLogsBlock and @@ -278,10 +282,12 @@ public Response getLogs(@Context HttpServletRequest req, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) @DefaultValue("false") boolean redirectedFromNode, - @QueryParam(YarnWebServiceParams.CLUSTER_ID) String clusterId) { + @QueryParam(YarnWebServiceParams.CLUSTER_ID) String clusterId, + @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) + @DefaultValue("false") boolean manualRedirection) { initForReadableEndpoints(res); return logServlet.getLogFile(req, containerIdStr, filename, format, size, - nmId, redirectedFromNode, clusterId); + nmId, redirectedFromNode, clusterId, manualRedirection); } @VisibleForTesting protected TimelineEntity getEntity(String path, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebServiceUtils.java index 10709237eda7f..c4568be8011a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebServiceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebServiceUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.webapp; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.UniformInterfaceException; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java index cf4e020d35ec8..311462bd11c2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java @@ -61,8 +61,8 @@ private static String getAppsTableColumnDefs( // Update following line if any column added in RM page before column 11 sb.append("{'sType':'num-ignore-str', ") .append("'aTargets': [12, 13, 14, 15, 16] },\n"); - // set progress column index to 19 - progressIndex = "[19]"; + // set progress column index to 21 + progressIndex = "[21]"; } else if (isFairSchedulerPage) { // Update following line if any column added in scheduler page before column 11 sb.append("{'sType':'num-ignore-str', ") @@ -112,4 +112,4 @@ public static String resourceRequestsTableInit() { .toString(); } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/YarnWebServiceParams.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/YarnWebServiceParams.java index c737fc82a24fc..0d9e9f68c1008 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/YarnWebServiceParams.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/YarnWebServiceParams.java @@ -38,4 +38,5 @@ public interface YarnWebServiceParams { String NM_ID = "nm.id"; String REDIRECTED_FROM_NODE = "redirected_from_node"; String CLUSTER_ID = "clusterid"; + String MANUAL_REDIRECTION = "manual_redirection"; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java index d053f33bd0a00..dab3ae2a81297 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java @@ -28,9 +28,12 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.util.Times; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.StringHelper; @Public @@ -63,8 +66,10 @@ public class AppInfo { protected int priority; private long allocatedCpuVcores; private long allocatedMemoryMB; + private long allocatedGpus; private long reservedCpuVcores; private long reservedMemoryMB; + private long reservedGpus; protected boolean unmanagedApplication; private String appNodeLabelExpression; private String amNodeLabelExpression; @@ -100,24 +105,35 @@ public AppInfo(ApplicationReport app) { if (app.getPriority() != null) { priority = app.getPriority().getPriority(); } - if (app.getApplicationResourceUsageReport() != null) { - runningContainers = app.getApplicationResourceUsageReport() + ApplicationResourceUsageReport usageReport = + app.getApplicationResourceUsageReport(); + if (usageReport != null) { + runningContainers = usageReport .getNumUsedContainers(); - if (app.getApplicationResourceUsageReport().getUsedResources() != null) { - allocatedCpuVcores = app.getApplicationResourceUsageReport() + if (usageReport.getUsedResources() != null) { + allocatedCpuVcores = usageReport .getUsedResources().getVirtualCores(); - allocatedMemoryMB = app.getApplicationResourceUsageReport() + allocatedMemoryMB = usageReport .getUsedResources().getMemorySize(); - reservedCpuVcores = app.getApplicationResourceUsageReport() + reservedCpuVcores = usageReport .getReservedResources().getVirtualCores(); - reservedMemoryMB = app.getApplicationResourceUsageReport() + reservedMemoryMB = usageReport .getReservedResources().getMemorySize(); + Integer gpuIndex = ResourceUtils.getResourceTypeIndex() + .get(ResourceInformation.GPU_URI); + allocatedGpus = -1; + reservedGpus = -1; + if (gpuIndex != null) { + allocatedGpus = usageReport.getUsedResources() + .getResourceValue(ResourceInformation.GPU_URI); + reservedGpus = usageReport.getReservedResources() + .getResourceValue(ResourceInformation.GPU_URI); + } } aggregateResourceAllocation = StringHelper.getResourceSecondsString( - app.getApplicationResourceUsageReport().getResourceSecondsMap()); + usageReport.getResourceSecondsMap()); aggregatePreemptedResourceAllocation = StringHelper - .getResourceSecondsString(app.getApplicationResourceUsageReport() - .getPreemptedResourceSecondsMap()); + .getResourceSecondsString(usageReport.getPreemptedResourceSecondsMap()); } progress = app.getProgress() * 100; // in percent if (app.getApplicationTags() != null && !app.getApplicationTags().isEmpty()) { @@ -176,6 +192,10 @@ public long getAllocatedMemoryMB() { return allocatedMemoryMB; } + public long getAllocatedGpus() { + return allocatedGpus; + } + public long getReservedCpuVcores() { return reservedCpuVcores; } @@ -184,6 +204,10 @@ public long getReservedMemoryMB() { return reservedMemoryMB; } + public long getReservedGpus() { + return reservedGpus; + } + public float getProgress() { return progress; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto index ff7153eca8e15..c643179888efe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto @@ -74,6 +74,7 @@ message RegisterNodeManagerRequestProto { optional ResourceProto physicalResource = 9; repeated LogAggregationReportProto log_aggregation_reports_for_apps = 10; optional NodeAttributesProto nodeAttributes = 11; + optional NodeStatusProto nodeStatus = 12; } message RegisterNodeManagerResponseProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index dedabc07d6c96..def59584d0ad0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; @@ -262,7 +262,7 @@ private void test(String rpcClass) throws Exception { new DummyContainerManager(), addr, conf, null, 1); server.start(); RPC.setProtocolEngine(conf, ContainerManagementProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ContainerManagementProtocol proxy = (ContainerManagementProtocol) rpc.getProxy(ContainerManagementProtocol.class, NetUtils.getConnectAddress(server), conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java index 16ba9032b6bf2..93ffd2aeed284 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java @@ -172,7 +172,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; /** * Mock Resource Manager facade implementation that exposes all the methods diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java index 9cfcb28b6a947..f7a51bacf6e09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java @@ -59,7 +59,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestProtocolRecords { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestPriorityRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestPriorityRouterPolicy.java index 3c036c1812410..e1799d321083c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestPriorityRouterPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestPriorityRouterPolicy.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.yarn.server.federation.policies.router; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -24,6 +25,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; @@ -82,4 +84,31 @@ public void testPickLowestWeight() throws YarnException { Assert.assertEquals("sc5", chosen.getId()); } + @Test + public void testZeroSubClustersWithPositiveWeight() throws Exception { + Map routerWeights = new HashMap<>(); + Map amrmWeights = new HashMap<>(); + // Set negative value to all subclusters + for (int i = 0; i < 5; i++) { + SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i); + + SubClusterInfo sci = mock(SubClusterInfo.class); + when(sci.getState()).thenReturn(SubClusterState.SC_RUNNING); + when(sci.getSubClusterId()).thenReturn(sc.toId()); + getActiveSubclusters().put(sc.toId(), sci); + routerWeights.put(sc, 0.0f); + amrmWeights.put(sc, -1.0f); + } + getPolicyInfo().setRouterPolicyWeights(routerWeights); + getPolicyInfo().setAMRMPolicyWeights(amrmWeights); + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + getPolicyInfo(), getActiveSubclusters()); + + intercept(FederationPolicyException.class, + "No Active Subcluster with weight vector greater than zero", + () -> ((FederationRouterPolicy) getPolicy()) + .getHomeSubcluster(getApplicationSubmissionContext(), null)); + } + + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/ContainerBlockTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/ContainerBlockTest.java index bfbd7021a3171..7c20259c3cbc1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/ContainerBlockTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/ContainerBlockTest.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.webapp; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.yarn.api.ApplicationBaseProtocol; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/TestAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/TestAppsBlock.java new file mode 100644 index 0000000000000..6853558130029 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/TestAppsBlock.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.webapp; + +import java.io.OutputStream; +import java.io.PrintWriter; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.io.output.ByteArrayOutputStream; +import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.YarnWebParams; +import org.apache.hadoop.yarn.webapp.view.BlockForTest; +import org.apache.hadoop.yarn.webapp.view.HtmlBlock; +import org.apache.hadoop.yarn.webapp.view.HtmlBlockForTest; +import org.junit.Test; + +public class TestAppsBlock { + + /** + * Test invalid application state.Exception should be thrown if application + * state is not valid. + */ + @Test(expected = IllegalArgumentException.class) + public void testInvalidAppState() { + AppsBlock appBlock = new AppsBlock(null, null) { + // override this so that apps block can fetch app state. + @Override + public Map moreParams() { + Map map = new HashMap<>(); + map.put(YarnWebParams.APP_STATE, "ACCEPTEDPING"); + return map; + } + + @Override + protected void renderData(Block html) { + } + }; + + // set up the test block to render AppsBlock + OutputStream outputStream = new ByteArrayOutputStream(); + HtmlBlock.Block block = createBlockToCreateTo(outputStream); + + // If application state is invalid it should throw exception + // instead of catching it. + appBlock.render(block); + } + + private static HtmlBlock.Block createBlockToCreateTo( + OutputStream outputStream) { + PrintWriter printWriter = new PrintWriter(outputStream); + HtmlBlock html = new HtmlBlockForTest(); + return new BlockForTest(html, printWriter, 10, false) { + @Override + protected void subView(Class cls) { + } + }; + }; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/TestLogServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/TestLogServlet.java new file mode 100644 index 0000000000000..130e154dc1e07 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/webapp/TestLogServlet.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.webapp; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.Mockito.mock; + +public class TestLogServlet { + /** + * Test that {@link LogServlet}'s constructor does not throw exception, + * if the log aggregation properties are bad. + */ + @Test + public void testLogServletNoException() { + YarnConfiguration conf = new YarnConfiguration(); + conf.set(YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS, "22"); + + // first test the factory's constructor throws exception + try { + LogAggregationFileControllerFactory factory = + new LogAggregationFileControllerFactory(conf); + fail("LogAggregationFileControllerFactory should have thrown exception"); + } catch (IllegalArgumentException expected) { + } + + // LogServlet should not throw exception + AppInfoProvider aip = mock(AppInfoProvider.class); + LogServlet ls = new LogServlet(conf, aip); + assertThat(ls).isNotNull(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml index ffe4833a5c32c..2504f7a3a9797 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml @@ -19,17 +19,18 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-nodemanager - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN NodeManager ${project.parent.parent.basedir} ../etc/hadoop + ../lib/native @@ -88,8 +89,8 @@ javax-websocket-server-impl - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava @@ -199,6 +200,7 @@ ${basedir}/src ${container-executor.conf.dir} + ${extra.libhadoop.rpath} ${sun.arch.data.model} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt index 677429bb99cae..e2cfbc52df213 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt @@ -25,6 +25,8 @@ set(GTEST_SRC_DIR ${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoo set(HADOOP_COMMON_SEC_PATH ${HADOOP_COMMON_PATH}/src/main/native/src/org/apache/hadoop/security) +set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) + # determine if container-executor.conf.dir is an absolute # path in case the OS we're compiling on doesn't have # a hook in get_executable. We'll use this define @@ -157,6 +159,18 @@ add_executable(container-executor main/native/container-executor/impl/main.c ) +# By embedding '$ORIGIN' into the RPATH of container-executor, dlopen will look in +# the directory containing container-executor. However, $ORIGIN is not supported by +# all operating systems. +if(${CMAKE_SYSTEM_NAME} MATCHES "Linux|SunOS") + set(RPATH "\$ORIGIN/") + if(EXTRA_LIBHADOOP_RPATH) + set(RPATH "${RPATH}:${EXTRA_LIBHADOOP_RPATH}/") + endif() + message("RPATH SET AS ${RPATH}.") + set_target_properties(container-executor PROPERTIES INSTALL_RPATH "${RPATH}") +endif() + target_link_libraries(container-executor container crypto @@ -168,6 +182,19 @@ output_directory(container-executor target/usr/local/bin) add_executable(test-container-executor main/native/container-executor/test/test-container-executor.c ) + +# By embedding '$ORIGIN' into the RPATH of test-container-executor, dlopen will look in +# the directory containing test-container-executor. However, $ORIGIN is not supported by +# all operating systems. +if(${CMAKE_SYSTEM_NAME} MATCHES "Linux|SunOS") + set(RPATH "\$ORIGIN/") + if(EXTRA_LIBHADOOP_RPATH) + set(RPATH "${RPATH}:${EXTRA_LIBHADOOP_RPATH}/") + endif() + message("RPATH SET AS ${RPATH}.") + set_target_properties(test-container-executor PROPERTIES INSTALL_RPATH "${RPATH}") +endif() + target_link_libraries(test-container-executor container ${EXTRA_LIBS} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 58a681f8e43d4..2cb84514f229f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -39,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java index c5fc481661bd9..48befe44f7836 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java @@ -21,12 +21,7 @@ import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.apache.hadoop.fs.CreateFlag.OVERWRITE; -import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; -import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.DataOutputStream; import java.io.File; import java.io.FileNotFoundException; @@ -38,7 +33,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Map; - +import java.util.Optional; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.fs.FileContext; @@ -46,6 +41,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.CommandExecutor; @@ -60,15 +56,16 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The {@code DefaultContainerExecuter} class offers generic container @@ -333,7 +330,7 @@ public int launchContainer(ContainerStartContext ctx) builder.append("Exception from container-launch.\n") .append("Container id: ").append(containerId).append("\n") .append("Exit code: ").append(exitCode).append("\n"); - if (!Optional.fromNullable(e.getMessage()).or("").isEmpty()) { + if (!Optional.ofNullable(e.getMessage()).orElse("").isEmpty()) { builder.append("Exception message: ") .append(e.getMessage()).append("\n"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java index 77bc12330653d..7f50a0758fd45 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java @@ -46,7 +46,7 @@ import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; public class DeletionService extends AbstractService { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java index ded1ed1fdcf36..27bdea7b7e460 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java @@ -22,6 +22,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -29,7 +30,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; @@ -47,8 +47,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * Manages a list of local storage directories. @@ -59,6 +59,9 @@ public class DirectoryCollection { private final Configuration conf; private final DiskValidator diskValidator; + + private boolean diskUtilizationThresholdEnabled; + private boolean diskFreeSpaceThresholdEnabled; /** * The enum defines disk failure type. */ @@ -239,9 +242,20 @@ public DirectoryCollection(String[] dirs, throw new YarnRuntimeException(e); } - localDirs = new CopyOnWriteArrayList<>(dirs); - errorDirs = new CopyOnWriteArrayList<>(); - fullDirs = new CopyOnWriteArrayList<>(); + diskUtilizationThresholdEnabled = conf. + getBoolean(YarnConfiguration. + NM_DISK_UTILIZATION_THRESHOLD_ENABLED, + YarnConfiguration. + DEFAULT_NM_DISK_UTILIZATION_THRESHOLD_ENABLED); + diskFreeSpaceThresholdEnabled = conf. + getBoolean(YarnConfiguration. + NM_DISK_FREE_SPACE_THRESHOLD_ENABLED, + YarnConfiguration. + DEFAULT_NM_DISK_FREE_SPACE_THRESHOLD_ENABLED); + + localDirs = new ArrayList<>(Arrays.asList(dirs)); + errorDirs = new ArrayList<>(); + fullDirs = new ArrayList<>(); directoryErrorInfo = new ConcurrentHashMap<>(); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); @@ -287,7 +301,7 @@ List getGoodDirs() { List getFailedDirs() { this.readLock.lock(); try { - return ImmutableList.copyOf( + return Collections.unmodifiableList( DirectoryCollection.concat(errorDirs, fullDirs)); } finally { this.readLock.unlock(); @@ -315,7 +329,7 @@ List getFullDirs() { List getErroredDirs() { this.readLock.lock(); try { - return Collections.unmodifiableList(errorDirs); + return ImmutableList.copyOf(errorDirs); } finally { this.readLock.unlock(); } @@ -520,7 +534,9 @@ Map testDirs(List dirs, diskUtilizationPercentageCutoffHigh : diskUtilizationPercentageCutoffLow; long diskFreeSpaceCutoff = goodDirs.contains(dir) ? diskFreeSpaceCutoffLow : diskFreeSpaceCutoffHigh; - if (isDiskUsageOverPercentageLimit(testDir, + + if (diskUtilizationThresholdEnabled + && isDiskUsageOverPercentageLimit(testDir, diskUtilizationPercentageCutoff)) { msg = "used space above threshold of " @@ -529,7 +545,8 @@ Map testDirs(List dirs, ret.put(dir, new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg)); continue; - } else if (isDiskFreeSpaceUnderLimit(testDir, diskFreeSpaceCutoff)) { + } else if (diskFreeSpaceThresholdEnabled + && isDiskFreeSpaceUnderLimit(testDir, diskFreeSpaceCutoff)) { msg = "free space below limit of " + diskFreeSpaceCutoff + "MB"; @@ -613,6 +630,28 @@ long getDiskUtilizationSpaceCutoffHigh() { return diskFreeSpaceCutoffHigh; } + @VisibleForTesting + boolean getDiskUtilizationThresholdEnabled() { + return diskUtilizationThresholdEnabled; + } + + @VisibleForTesting + boolean getDiskFreeSpaceThresholdEnabled() { + return diskFreeSpaceThresholdEnabled; + } + + @VisibleForTesting + void setDiskUtilizationThresholdEnabled(boolean + utilizationEnabled) { + diskUtilizationThresholdEnabled = utilizationEnabled; + } + + @VisibleForTesting + void setDiskFreeSpaceThresholdEnabled(boolean + freeSpaceEnabled) { + diskFreeSpaceThresholdEnabled = freeSpaceEnabled; + } + public void setDiskUtilizationSpaceCutoff(long freeSpaceCutoff) { setDiskUtilizationSpaceCutoff(freeSpaceCutoff, freeSpaceCutoff); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index 79fb817f97f33..13a0dd0e9ec92 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -18,14 +18,46 @@ package org.apache.hadoop.yarn.server.nodemanager; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; -import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; -import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPID; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPLICATION_LOCAL_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_ID_STR; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LAUNCH_PREFIX_COMMANDS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LOCAL_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LOG_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_RUN_CMDS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_WORK_DIR; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.FILECACHE_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOCALIZED_RESOURCES; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOCAL_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.LOG_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.NM_PRIVATE_CONTAINER_SCRIPT_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.NM_PRIVATE_KEYSTORE_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.NM_PRIVATE_TOKENS_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.NM_PRIVATE_TRUSTSTORE_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.PID; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.PID_FILE_PATH; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.RESOURCES_OPTIONS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.RUN_AS_USER; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.SIGNAL; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.TC_COMMAND_FILE; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER_FILECACHE_DIRS; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER_LOCAL_DIRS; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; @@ -53,6 +85,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; +import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerPrepareContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReacquisitionContext; @@ -64,17 +97,8 @@ import org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler; import org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler; import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.regex.Pattern; - -import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** *

    This class provides {@link Container} execution using a native @@ -614,7 +638,7 @@ private int handleExitCode(ContainerExecutionException e, Container container, .append("Container id: " + containerId + "\n") .append("Exit code: " + exitCode + "\n") .append("Exception message: " + e.getMessage() + "\n"); - if (!Optional.fromNullable(e.getErrorOutput()).or("").isEmpty()) { + if (!Optional.ofNullable(e.getErrorOutput()).orElse("").isEmpty()) { builder.append("Shell error output: " + e.getErrorOutput() + "\n"); } //Skip stack trace diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java index 591d2df801722..dfac7099e7a6a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java @@ -28,7 +28,7 @@ import java.util.Timer; import java.util.TimerTask; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskValidator; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index f90423cf6b6b4..42944d632b88a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java index d4898355868ef..7577b55899781 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java @@ -20,8 +20,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuNodeResourceUpdateHandler; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuResourcePlugin; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.slf4j.Logger; @@ -46,9 +49,13 @@ public class NodeResourceMonitorImpl extends AbstractService implements /** Resource calculator. */ private ResourceCalculatorPlugin resourceCalculatorPlugin; - /** Current resource utilization of the node. */ - private ResourceUtilization nodeUtilization; + /** Gpu related plugin. */ + private GpuResourcePlugin gpuResourcePlugin; + private GpuNodeResourceUpdateHandler gpuNodeResourceUpdateHandler; + /** Current resource utilization of the node. */ + private ResourceUtilization nodeUtilization = + ResourceUtilization.newInstance(0, 0, 0f); private Context nmContext; /** @@ -72,6 +79,18 @@ protected void serviceInit(Configuration conf) throws Exception { this.resourceCalculatorPlugin = ResourceCalculatorPlugin.getNodeResourceMonitorPlugin(conf); + if (nmContext.getResourcePluginManager() != null) { + this.gpuResourcePlugin = + (GpuResourcePlugin)nmContext.getResourcePluginManager(). + getNameToPlugins().get(ResourceInformation.GPU_URI); + + if (gpuResourcePlugin != null) { + this.gpuNodeResourceUpdateHandler = + (GpuNodeResourceUpdateHandler)gpuResourcePlugin. + getNodeResourceHandlerInstance(); + } + } + LOG.info(" Using ResourceCalculatorPlugin : " + this.resourceCalculatorPlugin); } @@ -152,6 +171,16 @@ public void run() { (int) (vmem >> 20), // B -> MB vcores); // Used Virtual Cores + float nodeGpuUtilization = 0F; + try { + if (gpuNodeResourceUpdateHandler != null) { + nodeGpuUtilization = + gpuNodeResourceUpdateHandler.getNodeGpuUtilization(); + } + } catch (Exception e) { + LOG.error("Get Node GPU Utilization error: " + e); + } + // Publish the node utilization metrics to node manager // metrics system. NodeManagerMetrics nmMetrics = nmContext.getNodeManagerMetrics(); @@ -159,6 +188,7 @@ public void run() { nmMetrics.setNodeUsedMemGB(nodeUtilization.getPhysicalMemory()); nmMetrics.setNodeUsedVMemGB(nodeUtilization.getVirtualMemory()); nmMetrics.setNodeCpuUtilization(nodeUtilization.getCPU()); + nmMetrics.setNodeGpuUtilization(nodeGpuUtilization); } try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 5e3693ae9c1b6..a98d31c2a2bec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -99,7 +99,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class NodeStatusUpdaterImpl extends AbstractService implements NodeStatusUpdater { @@ -392,12 +392,13 @@ protected void registerWithRM() // during RM recovery synchronized (this.context) { List containerReports = getNMContainerStatuses(); + NodeStatus nodeStatus = getNodeStatus(0); RegisterNodeManagerRequest request = RegisterNodeManagerRequest.newInstance(nodeId, httpPort, totalResource, nodeManagerVersionId, containerReports, getRunningApplications(), - nodeLabels, physicalResource, nodeAttributes); + nodeLabels, physicalResource, nodeAttributes, nodeStatus); - if (containerReports != null) { + if (containerReports != null && !containerReports.isEmpty()) { LOG.info("Registering with RM using containers :" + containerReports); } if (logAggregationEnabled) { @@ -405,10 +406,10 @@ nodeManagerVersionId, containerReports, getRunningApplications(), List logAggregationReports = context.getNMLogAggregationStatusTracker() .pullCachedLogAggregationReports(); - LOG.debug("The cache log aggregation status size:{}", - logAggregationReports.size()); if (logAggregationReports != null && !logAggregationReports.isEmpty()) { + LOG.debug("The cache log aggregation status size:{}", + logAggregationReports.size()); request.setLogAggregationReportsForApps(logAggregationReports); } } @@ -623,8 +624,10 @@ protected List getContainerStatuses() throws IOException { } containerStatuses.addAll(pendingCompletedContainers.values()); - LOG.debug("Sending out {} container statuses: {}", - containerStatuses.size(), containerStatuses); + if (!containerStatuses.isEmpty()) { + LOG.debug("Sending out {} container statuses: {}", + containerStatuses.size(), containerStatuses); + } return containerStatuses; } @@ -663,8 +666,10 @@ private List getNMContainerStatuses() throws IOException { addCompletedContainer(containerId); } } - LOG.info("Sending out " + containerStatuses.size() - + " NM container statuses: " + containerStatuses); + if (!containerStatuses.isEmpty()) { + LOG.info("Sending out " + containerStatuses.size() + + " NM container statuses: " + containerStatuses); + } return containerStatuses; } @@ -698,7 +703,7 @@ public void addCompletedContainer(ContainerId containerId) { @VisibleForTesting @Private public void removeOrTrackCompletedContainersFromContext( - List containerIds) throws IOException { + List containerIds) { Set removedContainers = new HashSet(); pendingContainersToRemove.addAll(containerIds); @@ -715,13 +720,13 @@ public void removeOrTrackCompletedContainersFromContext( removedContainers.add(containerId); iter.remove(); } + pendingCompletedContainers.remove(containerId); } if (!removedContainers.isEmpty()) { LOG.info("Removed completed containers from NM context: " + removedContainers); } - pendingCompletedContainers.clear(); } private void trackAppsForKeepAlive(List appIds) { @@ -776,8 +781,13 @@ public void removeVeryOldStoppedContainersFromCache() { while (i.hasNext()) { Entry mapEntry = i.next(); ContainerId cid = mapEntry.getKey(); - if (mapEntry.getValue() < currentTime) { - if (!context.getContainers().containsKey(cid)) { + if (mapEntry.getValue() >= currentTime) { + break; + } + if (!context.getContainers().containsKey(cid)) { + ApplicationId appId = + cid.getApplicationAttemptId().getApplicationId(); + if (isApplicationStopped(appId)) { i.remove(); try { context.getNMStateStore().removeContainer(cid); @@ -785,8 +795,6 @@ public void removeVeryOldStoppedContainersFromCache() { LOG.error("Unable to remove container " + cid + " in store", e); } } - } else { - break; } } } @@ -1293,6 +1301,7 @@ private class StatusUpdaterRunnable implements Runnable { @SuppressWarnings("unchecked") public void run() { int lastHeartbeatID = 0; + boolean missedHearbeat = false; while (!isStopped) { // Send heartbeat try { @@ -1346,6 +1355,20 @@ public void run() { removeOrTrackCompletedContainersFromContext(response .getContainersToBeRemovedFromNM()); + // If the last heartbeat was missed, it is possible that the + // RM saw this one as a duplicate and did not process it. + // If so, we can fail to notify the RM of these completed containers + // on the next heartbeat if we clear pendingCompletedContainers. + // If it wasn't a duplicate, the only impact is we might notify + // the RM twice, which it can handle. + if (!missedHearbeat) { + pendingCompletedContainers.clear(); + } else { + LOG.info("skipped clearing pending completed containers due to " + + "missed heartbeat"); + missedHearbeat = false; + } + logAggregationReportForAppsTempList.clear(); lastHeartbeatID = response.getResponseId(); List containersToCleanup = response @@ -1403,6 +1426,9 @@ public void run() { if (newResource != null) { updateNMResource(newResource); LOG.debug("Node's resource is updated to {}", newResource); + if (!totalResource.equals(newResource)) { + LOG.info("Node's resource is updated to {}", newResource); + } } if (timelineServiceV2Enabled) { updateTimelineCollectorData(response); @@ -1422,6 +1448,7 @@ public void run() { // TODO Better error handling. Thread can die with the rest of the // NM still running. LOG.error("Caught exception in status-updater", e); + missedHearbeat = true; } finally { synchronized (heartbeatMonitor) { nextHeartBeatInterval = nextHeartBeatInterval <= 0 ? diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyMetrics.java index a3ace43ac8e1b..068add689b0ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.amrmproxy; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java index d3c4a1d528861..821524dd6619d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java @@ -81,7 +81,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * AMRMProxyService is a service that runs on each node manager that can be used @@ -108,6 +108,8 @@ public class AMRMProxyService extends CompositeService implements private Map applPipelineMap; private RegistryOperations registry; private AMRMProxyMetrics metrics; + private FederationStateStoreFacade federationFacade; + private boolean federationEnabled = false; /** * Creates an instance of the service. @@ -144,7 +146,10 @@ protected void serviceInit(Configuration conf) throws Exception { RegistryOperations.class); addService(this.registry); } - + this.federationFacade = FederationStateStoreFacade.getInstance(); + this.federationEnabled = + conf.getBoolean(YarnConfiguration.FEDERATION_ENABLED, + YarnConfiguration.DEFAULT_FEDERATION_ENABLED); super.serviceInit(conf); } @@ -389,13 +394,22 @@ public void processApplicationStartRequest(StartContainerRequest request) throws IOException, YarnException { long startTime = clock.getTime(); try { - LOG.info("Callback received for initializing request " - + "processing pipeline for an AM"); ContainerTokenIdentifier containerTokenIdentifierForKey = BuilderUtils.newContainerTokenIdentifier(request.getContainerToken()); ApplicationAttemptId appAttemptId = containerTokenIdentifierForKey.getContainerID() .getApplicationAttemptId(); + ApplicationId applicationID = appAttemptId.getApplicationId(); + // Checking if application is there in federation state store only + // if federation is enabled. If + // application is submitted to router then it adds it in statestore. + // if application is not found in statestore that means its + // submitted to RM + if (!checkIfAppExistsInStateStore(applicationID)) { + return; + } + LOG.info("Callback received for initializing request " + + "processing pipeline for an AM"); Credentials credentials = YarnServerSecurityUtils .parseCredentials(request.getContainerLaunchContext()); @@ -772,6 +786,21 @@ private RequestInterceptorChainWrapper getInterceptorChain( } } + boolean checkIfAppExistsInStateStore(ApplicationId applicationID) { + if (!federationEnabled) { + return true; + } + + try { + // Check if app is there in state store. If app is not there then it + // throws Exception + this.federationFacade.getApplicationHomeSubCluster(applicationID); + } catch (YarnException ex) { + return false; + } + return true; + } + @SuppressWarnings("unchecked") private Token getFirstAMRMToken( Collection> allTokens) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java index 711682dcc4ceb..de73fc65e67f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java @@ -43,7 +43,7 @@ import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredAMRMProxyState; import org.apache.hadoop.yarn.server.security.MasterKeyData; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This secret manager instance is used by the AMRMProxyService to generate and diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java index 93b42cb5e560b..cfc44f72afd5e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.Map; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.exceptions.YarnException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java index d6deca024e6fb..7ae28ef1a34ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java @@ -22,7 +22,7 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; @@ -51,7 +51,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Extends the AbstractRequestInterceptor class and provides an implementation diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java index 5d165c995843f..e95594ca46cfc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java @@ -95,8 +95,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Extends the AbstractRequestInterceptor and provides an implementation for diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/ResourceLocalizationSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/ResourceLocalizationSpec.java index 63c3fd3fb8b6b..2d7560709e16d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/ResourceLocalizationSpec.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/ResourceLocalizationSpec.java @@ -21,7 +21,7 @@ import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.URL; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @Private @VisibleForTesting diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java index 154052208c18d..124211cc7a486 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java @@ -22,7 +22,7 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -42,7 +42,8 @@ public class LocalizationProtocolPBClientImpl implements LocalizationProtocol, private LocalizationProtocolPB proxy; public LocalizationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, LocalizationProtocolPB.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, LocalizationProtocolPB.class, + ProtobufRpcEngine2.class); proxy = (LocalizationProtocolPB)RPC.getProxy( LocalizationProtocolPB.class, clientVersion, addr, conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java index 95d701c1524c2..b2e76cbc04234 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java @@ -40,7 +40,7 @@ import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.server.nodemanager.containermanager.records.AuxServiceConfiguration; @@ -80,7 +80,7 @@ import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask; import org.apache.hadoop.yarn.util.FSDownload; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; public class AuxServices extends AbstractService implements ServiceStateChangeListener, EventHandler { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 1758028b60408..5eb36ba558ea0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesRequest; @@ -212,7 +212,7 @@ private enum ReInitOp { private final ResourceLocalizationService rsrcLocalizationSrvc; private final AbstractContainersLauncher containersLauncher; private final AuxServices auxiliaryServices; - private final NodeManagerMetrics metrics; + @VisibleForTesting final NodeManagerMetrics metrics; protected final NodeStatusUpdater nodeStatusUpdater; @@ -1082,8 +1082,10 @@ protected void startContainerInternal( ContainerId containerId = containerTokenIdentifier.getContainerID(); String containerIdStr = containerId.toString(); String user = containerTokenIdentifier.getApplicationSubmitter(); + Resource containerResource = containerTokenIdentifier.getResource(); - LOG.info("Start request for " + containerIdStr + " by user " + remoteUser); + LOG.info("Start request for " + containerIdStr + " by user " + remoteUser + + " with resource " + containerResource); ContainerLaunchContext launchContext = request.getContainerLaunchContext(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java index e519982151f33..8fe9651045c49 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java @@ -26,7 +26,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java index f8bb9b00b9493..0c06aae95327b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java @@ -138,4 +138,14 @@ T getContainerRuntimeData(Class runtimeClazz) * @return localization statuses. */ List getLocalizationStatuses(); + + /** + * Vector of localization counters to be passed from NM to application + * container via environment variable {@code $LOCALIZATION_COUNTERS}. See + * {@link org.apache.hadoop.yarn.api.ApplicationConstants.Environment#LOCALIZATION_COUNTERS} + * + * @return coma-separated counter values + */ + String localizationCountersAsString(); + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index f253b041d328b..8ad9784fc4043 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -23,6 +23,7 @@ import java.net.URISyntaxException; import java.nio.ByteBuffer; import java.text.SimpleDateFormat; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; @@ -42,10 +43,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -100,6 +102,14 @@ import org.apache.hadoop.yarn.util.resource.Resources; public class ContainerImpl implements Container { + private enum LocalizationCounter { + // 1-to-1 correspondence with MR TaskCounter.LOCALIZED_* + BYTES_MISSED, + BYTES_CACHED, + FILES_MISSED, + FILES_CACHED, + MILLIS; + } private static final class ReInitializationContext { private final ContainerLaunchContext newLaunchContext; @@ -153,6 +163,9 @@ private ReInitializationContext createContextForRollback() { private final NMStateStoreService stateStore; private final Credentials credentials; private final NodeManagerMetrics metrics; + private final long[] localizationCounts = + new long[LocalizationCounter.values().length]; + private volatile ContainerLaunchContext launchContext; private volatile ContainerTokenIdentifier containerTokenIdentifier; private final ContainerId containerId; @@ -1211,6 +1224,12 @@ public ContainerState transition(ContainerImpl container, } container.containerLocalizationStartTime = clock.getTime(); + // duration = end - start; + // record in RequestResourcesTransition: -start + // add in LocalizedTransition: +end + // + container.localizationCounts[LocalizationCounter.MILLIS.ordinal()] + = -Time.monotonicNow(); // Send requests for public, private resources Map cntrRsrc; @@ -1222,6 +1241,15 @@ public ContainerState transition(ContainerImpl container, container.resourceSet.addResources(ctxt.getLocalResources()); container.dispatcher.getEventHandler().handle( new ContainerLocalizationRequestEvent(container, req)); + // Get list of resources for logging + List resourcePaths = new ArrayList<>(); + for (Collection rsrcReqList : req.values()) { + for (LocalResourceRequest rsrc : rsrcReqList) { + resourcePaths.add(rsrc.getPath().toString()); + } + } + LOG.info("Container " + container.getContainerId() + + " is localizing: " + resourcePaths); return ContainerState.LOCALIZING; } else { container.sendScheduleEvent(); @@ -1259,6 +1287,21 @@ public ContainerState transition(ContainerImpl container, return ContainerState.LOCALIZING; } + final long localizedSize = rsrcEvent.getSize(); + if (localizedSize > 0) { + container.localizationCounts + [LocalizationCounter.BYTES_MISSED.ordinal()] += localizedSize; + container.localizationCounts + [LocalizationCounter.FILES_MISSED.ordinal()]++; + } else if (localizedSize < 0) { + // cached: recorded negative, restore the sign + container.localizationCounts + [LocalizationCounter.BYTES_CACHED.ordinal()] -= localizedSize; + container.localizationCounts + [LocalizationCounter.FILES_CACHED.ordinal()]++; + } + container.metrics.localizationCacheHitMiss(localizedSize); + // check to see if this resource should be uploaded to the shared cache // as well if (shouldBeUploadedToSharedCache(container, resourceRequest)) { @@ -1269,6 +1312,14 @@ public ContainerState transition(ContainerImpl container, return ContainerState.LOCALIZING; } + // duration = end - start; + // record in RequestResourcesTransition: -start + // add in LocalizedTransition: +end + // + container.localizationCounts[LocalizationCounter.MILLIS.ordinal()] + += Time.monotonicNow(); + container.metrics.localizationComplete( + container.localizationCounts[LocalizationCounter.MILLIS.ordinal()]); container.dispatcher.getEventHandler().handle( new ContainerLocalizationEvent(LocalizationEventType. CONTAINER_RESOURCES_LOCALIZED, container)); @@ -2301,4 +2352,14 @@ public T getContainerRuntimeData(Class runtimeClass) } return runtimeClass.cast(containerRuntimeData); } + + @Override + public String localizationCountersAsString() { + StringBuilder result = + new StringBuilder(String.valueOf(localizationCounts[0])); + for (int i = 1; i < localizationCounts.length; i++) { + result.append(',').append(localizationCounts[i]); + } + return result.toString(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceLocalizedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceLocalizedEvent.java index 4b742b14330e4..d5bcaa286e864 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceLocalizedEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerResourceLocalizedEvent.java @@ -25,6 +25,11 @@ public class ContainerResourceLocalizedEvent extends ContainerResourceEvent { private final Path loc; + // > 0: downloaded + // < 0: cached + // + private long size; + public ContainerResourceLocalizedEvent(ContainerId container, LocalResourceRequest rsrc, Path loc) { super(container, ContainerEventType.RESOURCE_LOCALIZED, rsrc); @@ -35,4 +40,12 @@ public Path getLocation() { return loc; } + public long getSize() { + return size; + } + + public void setSize(long size) { + this.size = size; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java index 957764fe4b38a..70b28d4fcaa79 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.api.records.ContainerRetryContext; import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java index 0ceb5146c1f00..4aabff7fdad9a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index 41e26d4471362..e864c14ad7092 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -96,7 +96,7 @@ import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.util.AuxiliaryServiceHelper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -179,15 +179,36 @@ public static String expandEnvironment(String var, return var; } - private Map expandAllEnvironmentVars( - ContainerLaunchContext launchContext, Path containerLogDir) { - Map environment = launchContext.getEnvironment(); + private void expandAllEnvironmentVars( + Map environment, Path containerLogDir) { for (Entry entry : environment.entrySet()) { String value = entry.getValue(); value = expandEnvironment(value, containerLogDir); entry.setValue(value); } - return environment; + } + + private void addKeystoreVars(Map environment, + Path containerWorkDir) { + environment.put(ApplicationConstants.KEYSTORE_FILE_LOCATION_ENV_NAME, + new Path(containerWorkDir, + ContainerLaunch.KEYSTORE_FILE).toUri().getPath()); + environment.put(ApplicationConstants.KEYSTORE_PASSWORD_ENV_NAME, + new String(container.getCredentials().getSecretKey( + AMSecretKeys.YARN_APPLICATION_AM_KEYSTORE_PASSWORD), + StandardCharsets.UTF_8)); + } + + private void addTruststoreVars(Map environment, + Path containerWorkDir) { + environment.put( + ApplicationConstants.TRUSTSTORE_FILE_LOCATION_ENV_NAME, + new Path(containerWorkDir, + ContainerLaunch.TRUSTSTORE_FILE).toUri().getPath()); + environment.put(ApplicationConstants.TRUSTSTORE_PASSWORD_ENV_NAME, + new String(container.getCredentials().getSecretKey( + AMSecretKeys.YARN_APPLICATION_AM_TRUSTSTORE_PASSWORD), + StandardCharsets.UTF_8)); } @Override @@ -222,8 +243,10 @@ public Integer call() { } launchContext.setCommands(newCmds); - Map environment = expandAllEnvironmentVars( - launchContext, containerLogDir); + // The actual expansion of environment variables happens after calling + // sanitizeEnv. This allows variables specified in NM_ADMIN_USER_ENV + // to reference user or container-defined variables. + Map environment = launchContext.getEnvironment(); // /////////////////////////// End of variable expansion // Use this to track variables that are added to the environment by nm. @@ -289,13 +312,6 @@ public Integer call() { lfs.create(nmPrivateKeystorePath, EnumSet.of(CREATE, OVERWRITE))) { keystoreOutStream.write(keystore); - environment.put(ApplicationConstants.KEYSTORE_FILE_LOCATION_ENV_NAME, - new Path(containerWorkDir, - ContainerLaunch.KEYSTORE_FILE).toUri().getPath()); - environment.put(ApplicationConstants.KEYSTORE_PASSWORD_ENV_NAME, - new String(container.getCredentials().getSecretKey( - AMSecretKeys.YARN_APPLICATION_AM_KEYSTORE_PASSWORD), - StandardCharsets.UTF_8)); } } else { nmPrivateKeystorePath = null; @@ -307,14 +323,6 @@ public Integer call() { lfs.create(nmPrivateTruststorePath, EnumSet.of(CREATE, OVERWRITE))) { truststoreOutStream.write(truststore); - environment.put( - ApplicationConstants.TRUSTSTORE_FILE_LOCATION_ENV_NAME, - new Path(containerWorkDir, - ContainerLaunch.TRUSTSTORE_FILE).toUri().getPath()); - environment.put(ApplicationConstants.TRUSTSTORE_PASSWORD_ENV_NAME, - new String(container.getCredentials().getSecretKey( - AMSecretKeys.YARN_APPLICATION_AM_TRUSTSTORE_PASSWORD), - StandardCharsets.UTF_8)); } } else { nmPrivateTruststorePath = null; @@ -335,6 +343,16 @@ public Integer call() { containerLogDirs, localResources, nmPrivateClasspathJarDir, nmEnvVars); + expandAllEnvironmentVars(environment, containerLogDir); + + // Add these if needed after expanding so we don't expand key values. + if (keystore != null) { + addKeystoreVars(environment, containerWorkDir); + } + if (truststore != null) { + addTruststoreVars(environment, containerWorkDir); + } + prepareContainer(localResources, containerLocalDirs); // Write out the environment @@ -1620,18 +1638,42 @@ public void sanitizeEnv(Map environment, Path pwd, addToEnvMap(environment, nmVars, Environment.PWD.name(), pwd.toString()); + addToEnvMap(environment, nmVars, Environment.LOCALIZATION_COUNTERS.name(), + container.localizationCountersAsString()); + if (!Shell.WINDOWS) { addToEnvMap(environment, nmVars, "JVM_PID", "$$"); } // variables here will be forced in, even if the container has - // specified them. + // specified them. Note: we do not track these in nmVars, to + // allow them to be ordered properly if they reference variables + // defined by the user. String defEnvStr = conf.get(YarnConfiguration.DEFAULT_NM_ADMIN_USER_ENV); Apps.setEnvFromInputProperty(environment, YarnConfiguration.NM_ADMIN_USER_ENV, defEnvStr, conf, File.pathSeparator); - nmVars.addAll(Apps.getEnvVarsFromInputProperty( - YarnConfiguration.NM_ADMIN_USER_ENV, defEnvStr, conf)); + + if (!Shell.WINDOWS) { + // maybe force path components + String forcePath = conf.get(YarnConfiguration.NM_ADMIN_FORCE_PATH, + YarnConfiguration.DEFAULT_NM_ADMIN_FORCE_PATH); + if (!forcePath.isEmpty()) { + String userPath = environment.get(Environment.PATH.name()); + environment.remove(Environment.PATH.name()); + if (userPath == null || userPath.isEmpty()) { + Apps.addToEnvironment(environment, Environment.PATH.name(), + forcePath, File.pathSeparator); + Apps.addToEnvironment(environment, Environment.PATH.name(), + "$PATH", File.pathSeparator); + } else { + Apps.addToEnvironment(environment, Environment.PATH.name(), + forcePath, File.pathSeparator); + Apps.addToEnvironment(environment, Environment.PATH.name(), + userPath, File.pathSeparator); + } + } + } // TODO: Remove Windows check and use this approach on all platforms after // additional testing. See YARN-358. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java index 1f7e8a7e1a74c..4988e54821de1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java @@ -47,8 +47,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * The launcher for the containers. This service should be started only after diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java index e6a5999f7c19d..dc7f354d1a836 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java index 140484b157b36..eab4c0b7744af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java index e0711dfd83bd8..fd52310fbf3e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.FileUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java index 6a87ede0512ae..bb9aed693c19a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java @@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java index 40c41bd2026ee..8c2432d8dd806 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java index 0b25db4ecc78d..06b47973e0374 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.util.CpuTimeTracker; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java index 6d74809dcc1e9..9d0deea55bbd0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java index 082eb09407888..906f54df29b16 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java @@ -30,7 +30,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.util.ArrayList; import java.util.List; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingJsonManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingJsonManager.java index eba0ce1deeb17..e59454b9394e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingJsonManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingJsonManager.java @@ -19,7 +19,7 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.File; import java.util.LinkedList; import java.util.List; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java index 3292f94c434e0..13027c3fdcf1c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerChain.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java index 2019417dd8890..f322a9b3c52c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java index 85d79ec9e1606..66bc474325989 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java @@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.fpga; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java index 4eb58f2cd8ca8..11a7b9f90d904 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java @@ -48,7 +48,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @InterfaceStability.Unstable @InterfaceAudience.Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java index 23007765b1fd9..555cb68dc833b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java @@ -18,11 +18,11 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java index e91ac3e041e05..72ab7364bd150 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.numa; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import java.io.Serializable; import java.util.Map; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java index ac55e2f9b31dd..0ffffbe6a336e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java @@ -29,7 +29,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -44,7 +44,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * NUMA Resources Allocator reads the NUMA topology and assigns NUMA nodes to diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java index f0f54ef4758fe..57c13568b7cef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java index 5885290979446..8283e062f2e56 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.authorize.AccessControlList; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerRuntime.java index 530f66e4d0157..a60cb5b7fa30a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerRuntime.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java index 12425073e9a90..707b5e6c0b771 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java index 87dfcd27f7a81..da18509921e5b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java @@ -17,7 +17,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Encapsulates the docker images command and its command diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/HdfsManifestToResourcesPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/HdfsManifestToResourcesPlugin.java index db9c19c379d2a..6dcd366a2062d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/HdfsManifestToResourcesPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/HdfsManifestToResourcesPlugin.java @@ -19,9 +19,9 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.runc; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java index 07ed9fa5a8c56..5916906f5a39d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.util.Shell.getAllShells; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,8 +84,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenIdentifier; import org.apache.hadoop.yarn.util.FSDownload; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; public class ContainerLocalizer { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalCacheDirectoryManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalCacheDirectoryManager.java index 4649e0b892734..43f8379a18e0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalCacheDirectoryManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalCacheDirectoryManager.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * {@link LocalCacheDirectoryManager} is used for managing hierarchical diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java index ad24c62828fea..0487454afd7e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java @@ -47,7 +47,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceReleaseEvent; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java index a75a13e956b06..8a8a49ab04399 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java @@ -244,9 +244,11 @@ public void transition(LocalizedResource rsrc, ResourceEvent event) { Path.getPathWithoutSchemeAndAuthority(locEvent.getLocation()); rsrc.size = locEvent.getSize(); for (ContainerId container : rsrc.ref) { - rsrc.dispatcher.getEventHandler().handle( + final ContainerResourceLocalizedEvent localizedEvent = new ContainerResourceLocalizedEvent( - container, rsrc.rsrc, rsrc.localPath)); + container, rsrc.rsrc, rsrc.localPath); + localizedEvent.setSize(rsrc.size); + rsrc.dispatcher.getEventHandler().handle(localizedEvent); } } } @@ -281,9 +283,11 @@ public void transition(LocalizedResource rsrc, ResourceEvent event) { ResourceRequestEvent reqEvent = (ResourceRequestEvent) event; ContainerId container = reqEvent.getContext().getContainerId(); rsrc.ref.add(container); - rsrc.dispatcher.getEventHandler().handle( + final ContainerResourceLocalizedEvent localizedEvent = new ContainerResourceLocalizedEvent( - container, rsrc.rsrc, rsrc.localPath)); + container, rsrc.rsrc, rsrc.localPath); + localizedEvent.setSize(-rsrc.size); + rsrc.dispatcher.getEventHandler().handle(localizedEvent); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizerContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizerContext.java index 6c4919fc3aa1f..6e1b9ea2bac0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizerContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizerContext.java @@ -25,7 +25,7 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.yarn.api.records.ContainerId; -import com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; public class LocalizerContext { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index b5403e8764b1a..b270704e5f93a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -143,10 +143,10 @@ import org.apache.hadoop.yarn.util.FSDownload; import org.apache.hadoop.yarn.util.LRUCacheHashMap; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.LoadingCache; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; +import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; public class ResourceLocalizationService extends CompositeService implements EventHandler, LocalizationProtocol { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadService.java index 9afbf3fde8c9b..e54882193afa4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadService.java @@ -39,7 +39,7 @@ import org.apache.hadoop.yarn.server.api.SCMUploaderProtocol; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java index 23aa5b3aa8fe1..227668131a695 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java @@ -47,7 +47,7 @@ import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.FSDownload; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The callable class that handles the actual upload to the shared cache. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java index 245dc103e933a..4cc0dc3c71369 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java @@ -28,6 +28,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,10 +72,8 @@ import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Times; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class AppLogAggregatorImpl implements AppLogAggregator { @@ -663,16 +662,9 @@ public Set doContainerLogAggregation( .getCurrentUpLoadedFileMeta()); // if any of the previous uploaded logs have been deleted, // we need to remove them from alreadyUploadedLogs - Iterable mask = - Iterables.filter(uploadedFileMeta, new Predicate() { - @Override - public boolean apply(String next) { - return logValue.getAllExistingFilesMeta().contains(next); - } - }); - - this.uploadedFileMeta = Sets.newHashSet(mask); - + this.uploadedFileMeta = uploadedFileMeta.stream().filter( + next -> logValue.getAllExistingFilesMeta().contains(next)).collect( + Collectors.toSet()); // need to return files uploaded or older-than-retention clean up. return Sets.union(logValue.getCurrentUpLoadedFilesPath(), logValue.getObsoleteRetentionLogFiles()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java index 2280e750f8853..9596b14fa2dda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java @@ -63,8 +63,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; public class LogAggregationService extends AbstractService implements LogHandler { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java index 9898f8f797921..7d55f2c108c5e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java @@ -55,7 +55,7 @@ import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLogDeleterState; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Log Handler which schedules deletion of log files based on the configured log diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 87929101928d9..a83ae3ad036fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; @@ -28,6 +28,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -45,11 +46,14 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher; import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils; +import org.apache.hadoop.yarn.server.nodemanager.webapp.ContainerLogsUtils; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree; import java.util.Arrays; +import java.io.File; import java.util.Map; +import java.util.List; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; @@ -67,6 +71,10 @@ public class ContainersMonitorImpl extends AbstractService implements private long monitoringInterval; private MonitoringThread monitoringThread; + private int logCheckInterval; + private LogMonitorThread logMonitorThread; + private long logDirSizeLimit; + private long logTotalSizeLimit; private CGroupElasticMemoryController oomListenerThread; private boolean containerMetricsEnabled; private long containerMetricsPeriodMs; @@ -94,6 +102,7 @@ public class ContainersMonitorImpl extends AbstractService implements private boolean elasticMemoryEnforcement; private boolean strictMemoryEnforcement; private boolean containersMonitorEnabled; + private boolean logMonitorEnabled; private long maxVCoresAllottedForContainers; @@ -122,6 +131,8 @@ public ContainersMonitorImpl(ContainerExecutor exec, this.monitoringThread = new MonitoringThread(); + this.logMonitorThread = new LogMonitorThread(); + this.containersUtilization = ResourceUtilization.newInstance(0, 0, 0.0f); } @@ -133,6 +144,16 @@ protected void serviceInit(Configuration myConf) throws Exception { this.conf.getLong(YarnConfiguration.NM_RESOURCE_MON_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_RESOURCE_MON_INTERVAL_MS)); + this.logCheckInterval = + conf.getInt(YarnConfiguration.NM_CONTAINER_LOG_MON_INTERVAL_MS, + YarnConfiguration.DEFAULT_NM_CONTAINER_LOG_MON_INTERVAL_MS); + this.logDirSizeLimit = + conf.getLong(YarnConfiguration.NM_CONTAINER_LOG_DIR_SIZE_LIMIT_BYTES, + YarnConfiguration.DEFAULT_NM_CONTAINER_LOG_DIR_SIZE_LIMIT_BYTES); + this.logTotalSizeLimit = + conf.getLong(YarnConfiguration.NM_CONTAINER_LOG_TOTAL_SIZE_LIMIT_BYTES, + YarnConfiguration.DEFAULT_NM_CONTAINER_LOG_TOTAL_SIZE_LIMIT_BYTES); + this.resourceCalculatorPlugin = ResourceCalculatorPlugin.getContainersMonitorPlugin(this.conf); LOG.info("Using ResourceCalculatorPlugin: {}", @@ -214,6 +235,11 @@ protected void serviceInit(Configuration myConf) throws Exception { isContainerMonitorEnabled() && monitoringInterval > 0; LOG.info("ContainersMonitor enabled: {}", containersMonitorEnabled); + logMonitorEnabled = + conf.getBoolean(YarnConfiguration.NM_CONTAINER_LOG_MONITOR_ENABLED, + YarnConfiguration.DEFAULT_NM_CONTAINER_LOG_MONITOR_ENABLED); + LOG.info("Container Log Monitor Enabled: "+ logMonitorEnabled); + nodeCpuPercentageForYARN = NodeManagerHardwareUtils.getNodeCpuPercentage(this.conf); @@ -284,13 +310,16 @@ protected void serviceStart() throws Exception { if (oomListenerThread != null) { oomListenerThread.start(); } + if (logMonitorEnabled) { + this.logMonitorThread.start(); + } super.serviceStart(); } @Override protected void serviceStop() throws Exception { + stopped = true; if (containersMonitorEnabled) { - stopped = true; this.monitoringThread.interrupt(); try { this.monitoringThread.join(); @@ -306,6 +335,13 @@ protected void serviceStop() throws Exception { } } } + if (logMonitorEnabled) { + this.logMonitorThread.interrupt(); + try { + this.logMonitorThread.join(); + } catch (InterruptedException e) { + } + } super.serviceStop(); } @@ -648,15 +684,20 @@ private void recordUsage(ContainerId containerId, String pId, long vmemLimit = ptInfo.getVmemLimit(); long pmemLimit = ptInfo.getPmemLimit(); if (AUDITLOG.isDebugEnabled()) { + int vcoreLimit = ptInfo.getCpuVcores(); + long cumulativeCpuTime = pTree.getCumulativeCpuTime(); AUDITLOG.debug( "Resource usage of ProcessTree {} for container-id {}:" + - " {} CPU:{} CPU/core:{}", + " {} %CPU: {} %CPU-cores: {}" + + " vCores-used: {} of {} Cumulative-CPU-ms: {}", pId, containerId, formatUsageString( currentVmemUsage, vmemLimit, currentPmemUsage, pmemLimit), cpuUsagePercentPerCore, - cpuUsageTotalCoresPercentage); + cpuUsageTotalCoresPercentage, + milliVcoresUsed / 1000, vcoreLimit, + cumulativeCpuTime); } // Add resource utilization for this container @@ -747,7 +788,8 @@ && isProcessTreeOverLimit(containerId.toString(), containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM; } - if (isMemoryOverLimit) { + if (isMemoryOverLimit + && trackingContainers.remove(containerId) != null) { // Virtual or physical memory over limit. Fail the container and // remove // the corresponding process tree @@ -761,7 +803,6 @@ && isProcessTreeOverLimit(containerId.toString(), eventDispatcher.getEventHandler().handle( new ContainerKillEvent(containerId, containerExitStatus, msg)); - trackingContainers.remove(containerId); LOG.info("Removed ProcessTree with root {}", pId); } } @@ -829,6 +870,72 @@ private String formatUsageString(long currentVmemUsage, long vmemLimit, } } + private class LogMonitorThread extends Thread { + LogMonitorThread() { + super("Container Log Monitor"); + } + + @Override + public void run() { + while (!stopped && !Thread.currentThread().isInterrupted()) { + for (Entry entry : + trackingContainers.entrySet()) { + ContainerId containerId = entry.getKey(); + ProcessTreeInfo ptInfo = entry.getValue(); + Container container = context.getContainers().get(containerId); + if (container == null) { + continue; + } + try { + List logDirs = ContainerLogsUtils.getContainerLogDirs( + containerId, container.getUser(), context); + long totalLogDataBytes = 0; + for (File dir : logDirs) { + long currentDirSizeBytes = FileUtil.getDU(dir); + totalLogDataBytes += currentDirSizeBytes; + String killMsg = null; + if (currentDirSizeBytes > logDirSizeLimit) { + killMsg = String.format( + "Container [pid=%s,containerID=%s] is logging beyond " + + "the container single log directory limit.%n" + + "Limit: %d Log Directory Size: %d Log Directory: %s" + + "%nKilling container.%n", + ptInfo.getPID(), containerId, logDirSizeLimit, + currentDirSizeBytes, dir); + } else if (totalLogDataBytes > logTotalSizeLimit) { + killMsg = String.format( + "Container [pid=%s,containerID=%s] is logging beyond " + + "the container total log limit.%n" + + "Limit: %d Total Size: >=%d" + + "%nKilling container.%n", + ptInfo.getPID(), containerId, logTotalSizeLimit, + totalLogDataBytes); + } + if (killMsg != null + && trackingContainers.remove(containerId) != null) { + LOG.warn(killMsg); + eventDispatcher.getEventHandler().handle( + new ContainerKillEvent(containerId, + ContainerExitStatus.KILLED_FOR_EXCESS_LOGS, killMsg)); + LOG.info("Removed ProcessTree with root " + ptInfo.getPID()); + break; + } + } + } catch (Exception e) { + LOG.warn("Uncaught exception in ContainerMemoryManager " + + "while monitoring log usage for " + containerId, e); + } + } + try { + Thread.sleep(logCheckInterval); + } catch (InterruptedException e) { + LOG.info("Log monitor thread was interrupted. " + + "Stopping container log monitoring."); + } + } + } + } + private void updateContainerMetrics(ContainersMonitorEvent monitoringEvent) { if (!containerMetricsEnabled || monitoringEvent == null) { return; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java index 0dfa33faac2b3..0bae02e237de3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/NECVEPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/NECVEPlugin.java index 7cbe3248923ec..38d534dbe7d26 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/NECVEPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/NECVEPlugin.java @@ -31,7 +31,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.File; import java.io.IOException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/VEDeviceDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/VEDeviceDiscoverer.java index 105fa70666922..ca85bce1d2f3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/VEDeviceDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/VEDeviceDiscoverer.java @@ -35,7 +35,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; class VEDeviceDiscoverer { private static final String STATE_TERMINATING = "TERMINATING"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java index 2ee44b210512b..2e13e2323949d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.com.nvidia; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java index ed80d3f80edf2..c34a55bd917ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java @@ -18,10 +18,10 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java index a99cc966e8272..aa558ea572712 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.ContainerId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java index 03a22f2d74392..860037d87c5e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDevice.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDevice.java index 5a70ef303a1c9..1dafd07cf4cb9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDevice.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDevice.java @@ -20,7 +20,7 @@ import java.io.Serializable; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** A class that represents an FPGA card. */ public class FpgaDevice implements Serializable { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java index 780b5e0915498..42686af09dcb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java @@ -41,9 +41,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class FpgaDiscoverer extends Configured { private static final Logger LOG = LoggerFactory.getLogger( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java index 42ee676932f40..a1413fbed6f60 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.Shell; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java index 4133fb4949218..0d05c9ee4d4fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java @@ -20,10 +20,10 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourcesExceptionUtil.throwIfNecessary; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java index afb0d7eda23e5..af81709566d18 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuNodeResourceUpdateHandler.java @@ -26,12 +26,14 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.NodeResourceUpdaterPlugin; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.PerGpuDeviceInformation; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI; @@ -76,4 +78,20 @@ public void updateConfiguredResource(Resource res) throws YarnException { res.setResourceValue(GPU_URI, nUsableGpus); } + + public float getNodeGpuUtilization() throws Exception{ + List gpuList = + gpuDiscoverer.getGpuDeviceInformation().getGpus(); + Float totalGpuUtilization = 0F; + if (gpuList != null && + gpuList.size() != 0) { + + totalGpuUtilization = gpuList + .stream() + .map(g -> g.getGpuUtilizations().getOverallGpuUtilization()) + .collect(Collectors.summingDouble(Float::floatValue)) + .floatValue() / gpuList.size(); + } + return totalGpuUtilization; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java index 36a0d55ea7c80..00d21203e48bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java index f5844858b48bf..38a9af4f50605 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.ResourceInformation; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java index 3a6d29635b07e..b88f6d570b6b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java index 283f6a60ab6b5..baa55530d5cf5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.volume.csi; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.yarn.api.CsiAdaptorProtocol; import org.apache.hadoop.yarn.api.protocolrecords.NodePublishVolumeRequest; import org.apache.hadoop.yarn.api.protocolrecords.NodeUnpublishVolumeRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthCheckerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthCheckerService.java index a89fb86362bcd..bbf61de1e37b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthCheckerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthCheckerService.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.server.nodemanager.health; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.Service; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthScriptRunner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthScriptRunner.java index 1c9bd82bd46ed..af92b15e9c73c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthScriptRunner.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/NodeHealthScriptRunner.java @@ -60,8 +60,9 @@ public class NodeHealthScriptRunner extends TimedHealthReporterService { "Node health script timed out"; private NodeHealthScriptRunner(String scriptName, long checkInterval, - long timeout, String[] scriptArgs) { - super(NodeHealthScriptRunner.class.getName(), checkInterval); + long timeout, String[] scriptArgs, boolean runBeforeStartup) { + super(NodeHealthScriptRunner.class.getName(), checkInterval, + runBeforeStartup); this.nodeHealthScript = scriptName; this.scriptTimeout = timeout; setTimerTask(new NodeHealthMonitorExecutor(scriptArgs)); @@ -91,6 +92,10 @@ public static NodeHealthScriptRunner newInstance(String scriptName, "interval-ms can not be set to a negative number."); } + boolean runBeforeStartup = conf.getBoolean( + YarnConfiguration.NM_HEALTH_CHECK_RUN_BEFORE_STARTUP, + YarnConfiguration.DEFAULT_NM_HEALTH_CHECK_RUN_BEFORE_STARTUP); + // Determine time out String scriptTimeoutConfig = String.format( YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS_TEMPLATE, @@ -113,7 +118,7 @@ public static NodeHealthScriptRunner newInstance(String scriptName, String[] scriptArgs = conf.getStrings(scriptArgsConfig, new String[]{}); return new NodeHealthScriptRunner(nodeHealthScript, - checkIntervalMs, scriptTimeout, scriptArgs); + checkIntervalMs, scriptTimeout, scriptArgs, runBeforeStartup); } private enum HealthCheckerExitStatus { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/TimedHealthReporterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/TimedHealthReporterService.java index a0c4d8b8ebd41..be4b0bf6c98b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/TimedHealthReporterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/health/TimedHealthReporterService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.health; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.service.AbstractService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,6 +45,7 @@ public abstract class TimedHealthReporterService extends AbstractService private Timer timer; private TimerTask task; private long intervalMs; + private boolean runBeforeStartup; TimedHealthReporterService(String name, long intervalMs) { super(name); @@ -52,6 +53,17 @@ public abstract class TimedHealthReporterService extends AbstractService this.healthReport = ""; this.lastReportedTime = System.currentTimeMillis(); this.intervalMs = intervalMs; + this.runBeforeStartup = false; + } + + TimedHealthReporterService(String name, long intervalMs, + boolean runBeforeStartup) { + super(name); + this.isHealthy = true; + this.healthReport = ""; + this.lastReportedTime = System.currentTimeMillis(); + this.intervalMs = intervalMs; + this.runBeforeStartup = runBeforeStartup; } @VisibleForTesting @@ -73,7 +85,13 @@ public void serviceStart() throws Exception { throw new Exception("Health reporting task hasn't been set!"); } timer = new Timer("HealthReporterService-Timer", true); - timer.scheduleAtFixedRate(task, 0, intervalMs); + long delay = 0; + if (runBeforeStartup) { + delay = intervalMs; + task.run(); + } + + timer.scheduleAtFixedRate(task, delay, intervalMs); super.serviceStart(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java index 8ecc1a17ca727..848b9445289a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java @@ -22,6 +22,7 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterInt; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableGaugeFloat; @@ -29,7 +30,7 @@ import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.yarn.api.records.Resource; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @Metrics(about="Metrics for node manager", context="yarn") public class NodeManagerMetrics { @@ -97,6 +98,23 @@ public class NodeManagerMetrics { MutableGaugeInt nodeUsedVMemGB; @Metric("Current CPU utilization") MutableGaugeFloat nodeCpuUtilization; + @Metric("Current GPU utilization") + MutableGaugeFloat nodeGpuUtilization; + + @Metric("Missed localization requests in bytes") + MutableCounterLong localizedCacheMissBytes; + @Metric("Cached localization requests in bytes") + MutableCounterLong localizedCacheHitBytes; + @Metric("Localization cache hit ratio (bytes)") + MutableGaugeInt localizedCacheHitBytesRatio; + @Metric("Missed localization requests (files)") + MutableCounterLong localizedCacheMissFiles; + @Metric("Cached localization requests (files)") + MutableCounterLong localizedCacheHitFiles; + @Metric("Localization cache hit ratio (files)") + MutableGaugeInt localizedCacheHitFilesRatio; + @Metric("Container localization time in milliseconds") + MutableRate localizationDurationMillis; // CHECKSTYLE:ON:VisibilityModifier @@ -411,4 +429,46 @@ public float getNodeCpuUtilization() { public void setNodeCpuUtilization(float cpuUtilization) { this.nodeCpuUtilization.set(cpuUtilization); } + + public void setNodeGpuUtilization(float nodeGpuUtilization) { + this.nodeGpuUtilization.set(nodeGpuUtilization); + } + + public float getNodeGpuUtilization() { + return nodeGpuUtilization.value(); + } + + private void updateLocalizationHitRatios() { + updateLocalizationHitRatio(localizedCacheHitBytes, localizedCacheMissBytes, + localizedCacheHitBytesRatio); + updateLocalizationHitRatio(localizedCacheHitFiles, localizedCacheMissFiles, + localizedCacheHitFilesRatio); + } + + private static void updateLocalizationHitRatio(MutableCounterLong hitCounter, + MutableCounterLong missedCounter, MutableGaugeInt ratioGauge) { + final long hits = hitCounter.value(); + final long misses = missedCounter.value(); + final long total = hits + misses; + if (total > 0) { + ratioGauge.set((int)(100 * hits / total)); + } + } + + public void localizationCacheHitMiss(long size) { + if (size > 0) { + localizedCacheMissBytes.incr(size); + localizedCacheMissFiles.incr(); + updateLocalizationHitRatios(); + } else if (size < 0) { + // cached: recorded negative, restore the sign + localizedCacheHitBytes.incr(-size); + localizedCacheHitFiles.incr(); + updateLocalizationHitRatios(); + } + } + + public void localizationComplete(long downloadMillis) { + localizationDurationMillis.add(downloadMillis); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeDescriptorsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeDescriptorsProvider.java index 088c9cb26402f..702bdb33463b6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeDescriptorsProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeDescriptorsProvider.java @@ -28,7 +28,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.Collections; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.service.AbstractService; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeAttributesProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeAttributesProvider.java index ab8a8b1cd0e4f..349e9fefb7304 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeAttributesProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeAttributesProvider.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.yarn.server.nodemanager.nodelabels; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.EnumUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.NodeAttribute; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java index ab62c69c20ded..b18f8e06fd62e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.server.nodemanager.recovery; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.ListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java index b4be05ad11f64..d4cfeb7c3f0f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.scheduler; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.Resource; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java index 23f5fd8da3515..a5a470f6046fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java @@ -43,7 +43,7 @@ import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager; import org.apache.hadoop.yarn.server.security.MasterKeyData; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java index 5a4de1f4b6a38..a95ae6d2c46f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java @@ -67,7 +67,7 @@ import org.apache.hadoop.yarn.util.TimelineServiceHelper; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Metrics publisher service that publishes data to the timeline service v.2. It diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java index 5aa00981200bf..53f84cd51afb7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java @@ -42,7 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c index 01c054a665556..ff59b96d23362 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c @@ -71,7 +71,7 @@ static void display_usage(FILE *stream) { " initialize container: %2d appid tokens nm-local-dirs " "nm-log-dirs cmd app...\n" " launch container: %2d appid containerid workdir " - "container-script tokens pidfile nm-local-dirs nm-log-dirs resources ", + "container-script tokens http-option pidfile nm-local-dirs nm-log-dirs resources ", INITIALIZE_CONTAINER, LAUNCH_CONTAINER); if(is_tc_support_enabled()) { @@ -80,10 +80,15 @@ static void display_usage(FILE *stream) { fputs("\n", stream); } + fputs( + " where http-option is one of:\n" + " --http\n" + " --https keystorepath truststorepath\n", stream); + de = is_docker_support_enabled() ? enabled : disabled; fprintf(stream, - "%11s launch docker container: %2d appid containerid workdir " - "container-script tokens pidfile nm-local-dirs nm-log-dirs " + "%11s launch docker container:%2d appid containerid workdir " + "container-script tokens http-option pidfile nm-local-dirs nm-log-dirs " "docker-command-file resources ", de, LAUNCH_DOCKER_CONTAINER); if(is_tc_support_enabled()) { @@ -92,6 +97,11 @@ static void display_usage(FILE *stream) { fputs("\n", stream); } + fputs( + " where http-option is one of:\n" + " --http\n" + " --https keystorepath truststorepath\n", stream); + fprintf(stream, " signal container: %2d container-pid signal\n" " delete as user: %2d relative-path\n" @@ -648,15 +658,15 @@ int main(int argc, char **argv) { assert_valid_setup(argv[0]); int operation = -1; - int ret = validate_arguments(argc, argv, &operation); + int exit_code = 0; + exit_code = validate_arguments(argc, argv, &operation); - if (ret != 0) { - flush_and_close_log_files(); - return ret; + if (exit_code != 0 || operation == -1) { + // if operation is still -1, the work was done in validate_arguments + // e.g. for --module-gpu + goto cleanup; } - int exit_code = 0; - switch (operation) { case CHECK_SETUP: //we already did this @@ -821,6 +831,7 @@ int main(int argc, char **argv) { break; } +cleanup: if (exit_code) { fprintf(ERRORFILE, "Nonzero exit code=%d, error message='%s'\n", exit_code, get_error_message(exit_code)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java index 8faa15c8f52cd..59a5a2d29f83e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java @@ -30,7 +30,7 @@ import java.util.Timer; import java.util.TimerTask; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java index b99c7d625ee7e..59a30370283de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java @@ -176,6 +176,105 @@ public void testDiskSpaceUtilizationLimit() throws IOException { dc.getGoodDirsDiskUtilizationPercentage()); } + @Test + public void testDiskSpaceUtilizationThresholdEnabled() throws IOException { + + String dirA = new File(testDir, "dirA").getPath(); + String[] dirs = {dirA}; + DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F); + + // Disable disk utilization threshold. + dc.setDiskUtilizationThresholdEnabled(false); + Assert.assertFalse(dc.getDiskUtilizationThresholdEnabled()); + + dc.checkDirs(); + Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(0, dc.getFailedDirs().size()); + Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + + // Enable disk utilization threshold. + dc.setDiskUtilizationThresholdEnabled(true); + Assert.assertTrue(dc.getDiskUtilizationThresholdEnabled()); + + dc.checkDirs(); + Assert.assertEquals(0, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(1, dc.getFailedDirs().size()); + Assert.assertEquals(1, dc.getFullDirs().size()); + Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA)); + Assert.assertEquals(DirectoryCollection.DiskErrorCause.DISK_FULL, + dc.getDirectoryErrorInfo(dirA).cause); + + // no good dirs + Assert.assertEquals(0, + dc.getGoodDirsDiskUtilizationPercentage()); + + dc = new DirectoryCollection(dirs, 100.0F); + int utilizedSpacePerc = + (int) ((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 / + testDir.getTotalSpace()); + dc.checkDirs(); + Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(0, dc.getFailedDirs().size()); + Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + + Assert.assertEquals(utilizedSpacePerc, + dc.getGoodDirsDiskUtilizationPercentage()); + + dc = new DirectoryCollection(dirs, + testDir.getTotalSpace() / (1024 * 1024)); + + // Disable disk utilization threshold. + dc.setDiskUtilizationThresholdEnabled(false); + Assert.assertFalse(dc.getDiskUtilizationThresholdEnabled()); + + // Disable disk free space threshold. + dc.setDiskFreeSpaceThresholdEnabled(false); + Assert.assertFalse(dc.getDiskFreeSpaceThresholdEnabled()); + dc.checkDirs(); + + Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(0, dc.getFailedDirs().size()); + Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + + dc = new DirectoryCollection(dirs, + testDir.getTotalSpace() / (1024 * 1024)); + + // Enable disk free space threshold. + dc.setDiskFreeSpaceThresholdEnabled(true); + Assert.assertTrue(dc.getDiskFreeSpaceThresholdEnabled()); + + dc.checkDirs(); + + Assert.assertEquals(0, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(1, dc.getFailedDirs().size()); + Assert.assertEquals(1, dc.getFullDirs().size()); + Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA)); + // no good dirs + Assert.assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage()); + + dc = new DirectoryCollection(dirs, 100.0F, 100.0F, 0); + utilizedSpacePerc = + (int)((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 / + testDir.getTotalSpace()); + dc.checkDirs(); + Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertEquals(0, dc.getFailedDirs().size()); + Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + + Assert.assertEquals(utilizedSpacePerc, + dc.getGoodDirsDiskUtilizationPercentage()); + } + @Test public void testDiskLimitsCutoffSetters() throws IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index b1fc2f1aa2617..3f4879b23ead7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager; +import static org.mockito.Mockito.mock; + import java.io.File; import java.io.IOException; import java.util.ArrayList; @@ -134,6 +136,9 @@ public long getRMIdentifier() { new DummyContainerManager(context, exec, del, nodeStatusUpdater, metrics, dirsHandler); nodeStatusUpdater.init(conf); + NodeResourceMonitorImpl nodeResourceMonitor = mock( + NodeResourceMonitorImpl.class); + ((NMContext) context).setNodeResourceMonitor(nodeResourceMonitor); ((NMContext)context).setContainerManager(containerManager); nodeStatusUpdater.start(); ((NMContext)context).setNodeStatusUpdater(nodeStatusUpdater); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java index e810046361de4..3ddd05a5ee828 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java @@ -29,7 +29,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.ClientId; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.TestRPC.TestImpl; @@ -220,7 +220,7 @@ public TestProtos.EmptyResponseProto ping( @Test public void testNMAuditLoggerWithIP() throws Exception { Configuration conf = new Configuration(); - RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine2.class); // Create server side implementation MyTestRPCServer serverImpl = new MyTestRPCServer(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index c0831ee022dc9..914764132e31f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -398,12 +398,15 @@ private class MyNodeStatusUpdater4 extends NodeStatusUpdaterImpl { private final long rmStartIntervalMS; private final boolean rmNeverStart; public ResourceTracker resourceTracker; + private final boolean useSocketTimeoutEx; public MyNodeStatusUpdater4(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics, - long rmStartIntervalMS, boolean rmNeverStart) { + long rmStartIntervalMS, boolean rmNeverStart, + boolean useSocketTimeoutEx) { super(context, dispatcher, healthChecker, metrics); this.rmStartIntervalMS = rmStartIntervalMS; this.rmNeverStart = rmNeverStart; + this.useSocketTimeoutEx = useSocketTimeoutEx; } @Override @@ -418,7 +421,8 @@ protected ResourceTracker getRMClient() throws IOException { HAUtil.isHAEnabled(conf)); resourceTracker = (ResourceTracker) RetryProxy.create(ResourceTracker.class, - new MyResourceTracker6(rmStartIntervalMS, rmNeverStart), + new MyResourceTracker6(rmStartIntervalMS, rmNeverStart, + useSocketTimeoutEx), retryPolicy); return resourceTracker; } @@ -721,15 +725,11 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) } else if (heartBeatID == 2 || heartBeatID == 3) { List statuses = request.getNodeStatus().getContainersStatuses(); - if (heartBeatID == 2) { - // NM should send completed containers again, since the last - // heartbeat is lost. - Assert.assertEquals(4, statuses.size()); - } else { - // NM should not send completed containers again, since the last - // heartbeat is successful. - Assert.assertEquals(2, statuses.size()); - } + // NM should send completed containers on heartbeat 2, + // since heartbeat 1 was lost. It will send them again on + // heartbeat 3, because it does not clear them if the previous + // heartbeat was lost in case the RM treated it as a duplicate. + Assert.assertEquals(4, statuses.size()); Assert.assertEquals(4, context.getContainers().size()); boolean container2Exist = false, container3Exist = false, @@ -760,14 +760,8 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) container5Exist = true; } } - if (heartBeatID == 2) { - Assert.assertTrue(container2Exist && container3Exist - && container4Exist && container5Exist); - } else { - // NM do not send completed containers again - Assert.assertTrue(container2Exist && !container3Exist - && container4Exist && !container5Exist); - } + Assert.assertTrue(container2Exist && container3Exist + && container4Exist && container5Exist); if (heartBeatID == 3) { finishedContainersPulledByAM.add(containerStatus3.getContainerId()); @@ -862,11 +856,14 @@ private class MyResourceTracker6 implements ResourceTracker { private long rmStartIntervalMS; private boolean rmNeverStart; private final long waitStartTime; + private final boolean useSocketTimeoutEx; - public MyResourceTracker6(long rmStartIntervalMS, boolean rmNeverStart) { + MyResourceTracker6(long rmStartIntervalMS, boolean rmNeverStart, + boolean useSocketTimeoutEx) { this.rmStartIntervalMS = rmStartIntervalMS; this.rmNeverStart = rmNeverStart; this.waitStartTime = System.currentTimeMillis(); + this.useSocketTimeoutEx = useSocketTimeoutEx; } @Override @@ -875,8 +872,13 @@ public RegisterNodeManagerResponse registerNodeManager( IOException { if (System.currentTimeMillis() - waitStartTime <= rmStartIntervalMS || rmNeverStart) { - throw new java.net.ConnectException("Faking RM start failure as start " - + "delay timer has not expired."); + if (useSocketTimeoutEx) { + throw new java.net.SocketTimeoutException( + "Faking RM start failure as start delay timer has not expired."); + } else { + throw new java.net.ConnectException( + "Faking RM start failure as start delay timer has not expired."); + } } else { NodeId nodeId = request.getNodeId(); Resource resource = request.getResource(); @@ -931,9 +933,8 @@ public void deleteBaseDir() throws IOException { public void testRecentlyFinishedContainers() throws Exception { NodeManager nm = new NodeManager(); YarnConfiguration conf = new YarnConfiguration(); - conf.set( - NodeStatusUpdaterImpl.YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS, - "10000"); + conf.setInt(NodeStatusUpdaterImpl. + YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS, 1); nm.init(conf); NodeStatusUpdaterImpl nodeStatusUpdater = (NodeStatusUpdaterImpl) nm.getNodeStatusUpdater(); @@ -948,18 +949,17 @@ public void testRecentlyFinishedContainers() throws Exception { nodeStatusUpdater.addCompletedContainer(cId); Assert.assertTrue(nodeStatusUpdater.isContainerRecentlyStopped(cId)); + // verify container remains even after expiration if app + // is still active nm.getNMContext().getContainers().remove(cId); - long time1 = System.currentTimeMillis(); - int waitInterval = 15; - while (waitInterval-- > 0 - && nodeStatusUpdater.isContainerRecentlyStopped(cId)) { - nodeStatusUpdater.removeVeryOldStoppedContainersFromCache(); - Thread.sleep(1000); - } - long time2 = System.currentTimeMillis(); - // By this time the container will be removed from cache. need to verify. + Thread.sleep(10); + nodeStatusUpdater.removeVeryOldStoppedContainersFromCache(); + Assert.assertTrue(nodeStatusUpdater.isContainerRecentlyStopped(cId)); + + // complete the application and verify container is removed + nm.getNMContext().getApplications().remove(appId); + nodeStatusUpdater.removeVeryOldStoppedContainersFromCache(); Assert.assertFalse(nodeStatusUpdater.isContainerRecentlyStopped(cId)); - Assert.assertTrue((time2 - time1) >= 10000 && (time2 - time1) <= 250000); } @Test(timeout = 90000) @@ -1380,8 +1380,8 @@ protected NodeStatusUpdater createUpdater(Context context, } } - @Test (timeout = 150000) - public void testNMConnectionToRM() throws Exception { + private void testNMConnectionToRMInternal(boolean useSocketTimeoutEx) + throws Exception { final long delta = 50000; final long connectionWaitMs = 5000; final long connectionRetryIntervalMs = 1000; @@ -1400,7 +1400,7 @@ protected NodeStatusUpdater createUpdater(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { NodeStatusUpdater nodeStatusUpdater = new MyNodeStatusUpdater4( context, dispatcher, healthChecker, metrics, - rmStartIntervalMS, true); + rmStartIntervalMS, true, useSocketTimeoutEx); return nodeStatusUpdater; } }; @@ -1432,7 +1432,7 @@ protected NodeStatusUpdater createUpdater(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { NodeStatusUpdater nodeStatusUpdater = new MyNodeStatusUpdater4( context, dispatcher, healthChecker, metrics, rmStartIntervalMS, - false); + false, useSocketTimeoutEx); return nodeStatusUpdater; } }; @@ -1463,6 +1463,16 @@ protected NodeStatusUpdater createUpdater(Context context, (duration < (rmStartIntervalMS + delta))); } + @Test (timeout = 150000) + public void testNMConnectionToRM() throws Exception { + testNMConnectionToRMInternal(false); + } + + @Test (timeout = 150000) + public void testNMConnectionToRMwithSocketTimeout() throws Exception { + testNMConnectionToRMInternal(true); + } + /** * Verifies that if for some reason NM fails to start ContainerManager RPC * server, RM is oblivious to NM's presence. The behaviour is like this diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForAttributes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForAttributes.java index 072f4432c6277..c10680ff3cf62 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForAttributes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForAttributes.java @@ -31,7 +31,7 @@ import java.util.TimerTask; import java.util.concurrent.TimeoutException; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.service.ServiceOperations; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java index b269fa45677ee..60e383870fe28 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java @@ -662,6 +662,27 @@ public void testAppRecoveryFailure() throws YarnException, Exception { Assert.assertEquals(0, state.getAppContexts().size()); } + @Test + public void testCheckIfAppExistsInStateStore() + throws IOException, YarnException { + ApplicationId appId = ApplicationId.newInstance(0, 0); + Configuration conf = createConfiguration(); + conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true); + + createAndStartAMRMProxyService(conf); + + Assert.assertEquals(false, + getAMRMProxyService().checkIfAppExistsInStateStore(appId)); + + Configuration distConf = createConfiguration(); + conf.setBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED, true); + + createAndStartAMRMProxyService(distConf); + + Assert.assertEquals(true, + getAMRMProxyService().checkIfAppExistsInStateStore(appId)); + } + /** * A mock intercepter implementation that uses the same mockRM instance across * restart. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java index 7a85bfab44efc..9ee3ce6bc8b45 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java @@ -26,6 +26,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.doNothing; +import org.apache.hadoop.yarn.server.nodemanager.NodeResourceMonitorImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -156,32 +157,20 @@ public NMLogAggregationStatusTracker getNMLogAggregationStatusTracker() { protected NodeHealthCheckerService nodeHealthChecker; protected LocalDirsHandlerService dirsHandler; protected final long DUMMY_RM_IDENTIFIER = 1234; + private NodeResourceMonitorImpl nodeResourceMonitor = mock( + NodeResourceMonitorImpl.class); + private NodeHealthCheckerService nodeHealthCheckerService; + private NodeStatusUpdater nodeStatusUpdater; + protected ContainerManagerImpl containerManager = null; - protected NodeStatusUpdater nodeStatusUpdater = new NodeStatusUpdaterImpl( - context, new AsyncDispatcher(), null, metrics) { - @Override - protected ResourceTracker getRMClient() { - return new LocalRMInterface(); - }; - - @Override - protected void stopRMProxy() { - return; - } - - @Override - protected void startStatusUpdater() { - return; // Don't start any updating thread. - } - - @Override - public long getRMIdentifier() { - // There is no real RM registration, simulate and set RMIdentifier - return DUMMY_RM_IDENTIFIER; - } - }; + public NodeStatusUpdater getNodeStatusUpdater() { + return nodeStatusUpdater; + } - protected ContainerManagerImpl containerManager = null; + public void setNodeStatusUpdater( + NodeStatusUpdater nodeStatusUpdater) { + this.nodeStatusUpdater = nodeStatusUpdater; + } protected ContainerExecutor createContainerExecutor() { DefaultContainerExecutor exec = new DefaultContainerExecutor(); @@ -218,11 +207,36 @@ public void setup() throws IOException { delSrvc.init(conf); dirsHandler = new LocalDirsHandlerService(); - nodeHealthChecker = new NodeHealthCheckerService(dirsHandler); - nodeHealthChecker.init(conf); + dirsHandler.init(conf); + nodeHealthCheckerService = new NodeHealthCheckerService(dirsHandler); + nodeStatusUpdater = new NodeStatusUpdaterImpl( + context, new AsyncDispatcher(), nodeHealthCheckerService, metrics) { + @Override + protected ResourceTracker getRMClient() { + return new LocalRMInterface(); + }; + + @Override + protected void stopRMProxy() { + return; + } + + @Override + protected void startStatusUpdater() { + return; // Don't start any updating thread. + } + + @Override + public long getRMIdentifier() { + // There is no real RM registration, simulate and set RMIdentifier + return DUMMY_RM_IDENTIFIER; + } + }; + containerManager = createContainerManager(delSrvc); ((NMContext)context).setContainerManager(containerManager); ((NMContext)context).setContainerExecutor(exec); + ((NMContext)context).setNodeResourceMonitor(nodeResourceMonitor); nodeStatusUpdater.init(conf); containerManager.init(conf); nodeStatusUpdater.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index 19bcee8152331..510af8958021b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -46,8 +46,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Charsets; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import java.io.File; import java.io.IOException; import java.net.URL; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index e215980882450..b2f183c368ae5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -18,13 +18,17 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesResponse; import org.apache.hadoop.yarn.api.records.LocalizationState; import org.apache.hadoop.yarn.api.records.LocalizationStatus; import org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -43,15 +47,18 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; @@ -193,8 +200,8 @@ public int launchContainer(ContainerStartContext ctx) @Override protected ContainerManagerImpl createContainerManager(DeletionService delSrvc) { - return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, - metrics, dirsHandler) { + return new ContainerManagerImpl(context, exec, delSrvc, + getNodeStatusUpdater(), metrics, dirsHandler) { @Override protected UserGroupInformation getRemoteUgi() throws YarnException { @@ -321,6 +328,39 @@ public void testContainerSetup() throws Exception { BufferedReader reader = new BufferedReader(new FileReader(targetFile)); Assert.assertEquals("Hello World!", reader.readLine()); Assert.assertEquals(null, reader.readLine()); + + // + // check the localization counter + // + long targetFileSize = + FileUtil.getDU(targetFile.getCanonicalFile().getParentFile()); + MetricsRecordBuilder rb = getMetrics("NodeManagerMetrics"); + assertCounter("LocalizedCacheMissBytes", targetFileSize, rb); + assertCounter("LocalizedCacheHitBytes", 0L, rb); + assertCounter("LocalizedCacheMissFiles", 1L, rb); + assertCounter("LocalizedCacheHitFiles", 0L, rb); + assertGaugeGt("LocalizationDurationMillisAvgTime", 0, rb); + assertGauge("LocalizedCacheHitBytesRatio", 0, rb); + assertGauge("LocalizedCacheHitFilesRatio", 0, rb); + + // test cache being used + final ContainerId cid1 = createContainerId(1); + containerManager.startContainers(StartContainersRequest.newInstance( + Collections.singletonList( + StartContainerRequest.newInstance( + containerLaunchContext, + createContainerToken(cid1, DUMMY_RM_IDENTIFIER, + context.getNodeId(), + user, + context.getContainerTokenSecretManager()))))); + waitForContainerState(containerManager, cid1, ContainerState.COMPLETE); + rb = getMetrics("NodeManagerMetrics"); + assertCounter("LocalizedCacheMissBytes", targetFileSize, rb); + assertCounter("LocalizedCacheHitBytes", targetFileSize, rb); + assertCounter("LocalizedCacheMissFiles", 1L, rb); + assertCounter("LocalizedCacheHitFiles", 1L, rb); + assertGauge("LocalizedCacheHitBytesRatio", 50, rb); + assertGauge("LocalizedCacheHitFilesRatio", 50, rb); } @Test (timeout = 10000L) @@ -1704,7 +1744,7 @@ public void testStartContainerFailureWithUnknownAuxService() throws Exception { @Test public void testNullTokens() throws Exception { ContainerManagerImpl cMgrImpl = - new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, + new ContainerManagerImpl(context, exec, delSrvc, getNodeStatusUpdater(), metrics, dirsHandler); String strExceptionMsg = ""; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java index 826cc02219bc2..c67ae86f95ef2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -437,14 +437,16 @@ public void testNodeManagerMetricsRecovery() throws Exception { waitForNMContainerState(cm, cid, org.apache.hadoop.yarn.server.nodemanager .containermanager.container.ContainerState.RUNNING); - TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, 1, 1, 1, 9, 1, 7); + TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, + 1, 1, 1, 9, 1, 7, 0F); // restart and verify metrics could be recovered cm.stop(); DefaultMetricsSystem.shutdown(); metrics = NodeManagerMetrics.create(); metrics.addResource(Resource.newInstance(10240, 8)); - TestNodeManagerMetrics.checkMetrics(0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8); + TestNodeManagerMetrics.checkMetrics(0, 0, 0, 0, 0, 0, + 0, 0, 10, 0, 8, 0F); context = createContext(conf, stateStore); cm = createContainerManager(context, delSrvc); cm.init(conf); @@ -452,7 +454,8 @@ public void testNodeManagerMetricsRecovery() throws Exception { assertEquals(1, context.getApplications().size()); app = context.getApplications().get(appId); assertNotNull(app); - TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, 1, 1, 1, 9, 1, 7); + TestNodeManagerMetrics.checkMetrics(1, 0, 0, 0, 0, + 1, 1, 1, 9, 1, 7, 0F); cm.stop(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java index 5f023f02df1d9..3498ebe40da1b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.net.SocketException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -28,6 +29,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; @@ -65,8 +67,8 @@ public void setUp() throws Exception { @Override protected ContainerManagerImpl createContainerManager(DeletionService delSrvc) { - return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, - metrics, dirsHandler) { + return new ContainerManagerImpl(context, exec, delSrvc, + getNodeStatusUpdater(), metrics, dirsHandler) { @Override public StartContainersResponse startContainers( @@ -161,15 +163,10 @@ public void testNMProxyRPCRetry() throws Exception { IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 100); // connect to some dummy address so that it can trigger // connection failure and RPC level retires. - newConf.set(YarnConfiguration.NM_ADDRESS, "0.0.0.0:1234"); + newConf.set(YarnConfiguration.NM_ADDRESS, "127.0.0.1:1"); ContainerManagementProtocol proxy = getNMProxy(newConf); - try { - proxy.startContainers(allRequests); - Assert.fail("should get socket exception"); - } catch (IOException e) { - // socket exception should be thrown immediately, without RPC retries. - Assert.assertTrue(e instanceof java.net.SocketException); - } + LambdaTestUtils.intercept(SocketException.class, + () -> proxy.startContainers(allRequests)); } private ContainerManagementProtocol getNMProxy(Configuration conf) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestResourceMappings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestResourceMappings.java index 561ce0c018598..bfe6ab5473cef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestResourceMappings.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestResourceMappings.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; import org.junit.Assert; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index bda78032e16c7..a9bcef77c3963 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -54,8 +54,8 @@ import java.util.jar.JarFile; import java.util.jar.Manifest; -import com.google.common.base.Supplier; -import com.google.common.collect.Lists; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -673,7 +673,7 @@ public void testPrependDistcache() throws Exception { Container container = mock(Container.class); when(container.getContainerId()).thenReturn(cId); when(container.getLaunchContext()).thenReturn(containerLaunchContext); - when(container.getLocalizedResources()).thenReturn(null); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); Dispatcher dispatcher = mock(Dispatcher.class); EventHandler eventHandler = new EventHandler() { public void handle(Event event) { @@ -814,8 +814,6 @@ public void handle(Event event) { Assert.assertTrue(userSetEnv.containsKey(testKey1)); Assert.assertTrue(userSetEnv.containsKey(testKey2)); Assert.assertTrue(userSetEnv.containsKey(testKey3)); - Assert.assertTrue(nmEnvTrack.contains("MALLOC_ARENA_MAX")); - Assert.assertTrue(nmEnvTrack.contains("MOUNT_LIST")); Assert.assertEquals(userMallocArenaMaxVal + File.pathSeparator + mallocArenaMaxVal, userSetEnv.get("MALLOC_ARENA_MAX")); Assert.assertEquals(testVal1, userSetEnv.get(testKey1)); @@ -823,6 +821,69 @@ public void handle(Event event) { Assert.assertEquals(testVal3, userSetEnv.get(testKey3)); } + @Test + public void testNmForcePath() throws Exception { + // Valid only for unix + assumeNotWindows(); + ContainerLaunchContext containerLaunchContext = + recordFactory.newRecordInstance(ContainerLaunchContext.class); + ApplicationId appId = ApplicationId.newInstance(0, 0); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, 1); + ContainerId cId = ContainerId.newContainerId(appAttemptId, 0); + Map userSetEnv = new HashMap<>(); + Set nmEnvTrack = new LinkedHashSet<>(); + containerLaunchContext.setEnvironment(userSetEnv); + Container container = mock(Container.class); + when(container.getContainerId()).thenReturn(cId); + when(container.getLaunchContext()).thenReturn(containerLaunchContext); + when(container.getLocalizedResources()).thenReturn(null); + Dispatcher dispatcher = mock(Dispatcher.class); + EventHandler eventHandler = new EventHandler() { + public void handle(Event event) { + Assert.assertTrue(event instanceof ContainerExitEvent); + ContainerExitEvent exitEvent = (ContainerExitEvent) event; + Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, + exitEvent.getType()); + } + }; + when(dispatcher.getEventHandler()).thenReturn(eventHandler); + + String testDir = System.getProperty("test.build.data", + "target/test-dir"); + Path pwd = new Path(testDir); + List appDirs = new ArrayList<>(); + List userLocalDirs = new ArrayList<>(); + List containerLogs = new ArrayList<>(); + Map> resources = new HashMap<>(); + Path nmp = new Path(testDir); + + YarnConfiguration conf = new YarnConfiguration(); + String forcePath = "./force-path"; + conf.set("yarn.nodemanager.force.path", forcePath); + + ContainerLaunch launch = new ContainerLaunch(distContext, conf, + dispatcher, exec, null, container, dirsHandler, containerManager); + launch.sanitizeEnv(userSetEnv, pwd, appDirs, userLocalDirs, containerLogs, + resources, nmp, nmEnvTrack); + + Assert.assertTrue(userSetEnv.containsKey(Environment.PATH.name())); + Assert.assertEquals(forcePath + ":$PATH", + userSetEnv.get(Environment.PATH.name())); + + String userPath = "/usr/bin:/usr/local/bin"; + userSetEnv.put(Environment.PATH.name(), userPath); + containerLaunchContext.setEnvironment(userSetEnv); + when(container.getLaunchContext()).thenReturn(containerLaunchContext); + + launch.sanitizeEnv(userSetEnv, pwd, appDirs, userLocalDirs, containerLogs, + resources, nmp, nmEnvTrack); + + Assert.assertTrue(userSetEnv.containsKey(Environment.PATH.name())); + Assert.assertEquals(forcePath + ":" + userPath, + userSetEnv.get(Environment.PATH.name())); + } + @Test public void testErrorLogOnContainerExit() throws Exception { verifyTailErrorLogOnContainerExit(new Configuration(), "/stderr", false); @@ -865,6 +926,7 @@ private void verifyTailErrorLogOnContainerExit(Configuration conf, .newContainerId(ApplicationAttemptId.newInstance(appId, 1), 1); when(container.getContainerId()).thenReturn(containerId); when(container.getUser()).thenReturn("test"); + when(container.localizationCountersAsString()).thenReturn(""); String relativeContainerLogDir = ContainerLaunch.getRelativeContainerLogDir( appId.toString(), containerId.toString()); Path containerLogDir = @@ -1793,6 +1855,7 @@ public void testContainerLaunchOnConfigurationError() throws Exception { when(id.toString()).thenReturn("1"); when(container.getContainerId()).thenReturn(id); when(container.getUser()).thenReturn("user"); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); ContainerLaunchContext clc = mock(ContainerLaunchContext.class); when(clc.getCommands()).thenReturn(Lists.newArrayList()); when(container.getLaunchContext()).thenReturn(clc); @@ -2389,6 +2452,7 @@ public void testDistributedCacheDirs() throws Exception { .newContainerId(ApplicationAttemptId.newInstance(appId, 1), 1); when(container.getContainerId()).thenReturn(containerId); when(container.getUser()).thenReturn("test"); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); when(container.getLocalizedResources()) .thenReturn(Collections.> emptyMap()); @@ -2498,6 +2562,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { when(container.getLaunchContext()).thenReturn(clc); Credentials credentials = mock(Credentials.class); when(container.getCredentials()).thenReturn(credentials); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); doAnswer(new Answer() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { @@ -2598,4 +2663,94 @@ private String readStringFromPath(Path p) throws IOException { return new String(bytes); } } + + @Test(timeout = 20000) + public void testExpandNmAdmEnv() throws Exception { + // setup mocks + Dispatcher dispatcher = mock(Dispatcher.class); + EventHandler handler = mock(EventHandler.class); + when(dispatcher.getEventHandler()).thenReturn(handler); + ContainerExecutor containerExecutor = mock(ContainerExecutor.class); + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + DataOutputStream dos = (DataOutputStream) args[0]; + dos.writeBytes("script"); + return null; + } + }).when(containerExecutor).writeLaunchEnv( + any(), any(), any(), any(), any(), any(), any()); + Application app = mock(Application.class); + ApplicationId appId = mock(ApplicationId.class); + when(appId.toString()).thenReturn("1"); + when(app.getAppId()).thenReturn(appId); + Container container = mock(Container.class); + ContainerId id = mock(ContainerId.class); + when(id.toString()).thenReturn("1"); + when(container.getContainerId()).thenReturn(id); + when(container.getUser()).thenReturn("user"); + ContainerLaunchContext clc = mock(ContainerLaunchContext.class); + when(clc.getCommands()).thenReturn(Lists.newArrayList()); + when(container.getLaunchContext()).thenReturn(clc); + Credentials credentials = mock(Credentials.class); + when(container.getCredentials()).thenReturn(credentials); + when(container.localizationCountersAsString()).thenReturn("1,2,3,4,5"); + + // Define user environment variables. + Map userSetEnv = new HashMap(); + String userVar = "USER_VAR"; + String userVarVal = "user-var-value"; + userSetEnv.put(userVar, userVarVal); + when(clc.getEnvironment()).thenReturn(userSetEnv); + + YarnConfiguration localConf = new YarnConfiguration(conf); + + // Admin Env var that depends on USER_VAR1 + String testKey1 = "TEST_KEY1"; + String testVal1 = "relies on {{USER_VAR}}"; + localConf.set( + YarnConfiguration.NM_ADMIN_USER_ENV + "." + testKey1, testVal1); + String testVal1Expanded; // this is what we expect after {{}} expansion + if (Shell.WINDOWS) { + testVal1Expanded = "relies on %USER_VAR%"; + } else { + testVal1Expanded = "relies on $USER_VAR"; + } + // Another Admin Env var that depends on the first one + String testKey2 = "TEST_KEY2"; + String testVal2 = "relies on {{TEST_KEY1}}"; + localConf.set( + YarnConfiguration.NM_ADMIN_USER_ENV + "." + testKey2, testVal2); + String testVal2Expanded; // this is what we expect after {{}} expansion + if (Shell.WINDOWS) { + testVal2Expanded = "relies on %TEST_KEY1%"; + } else { + testVal2Expanded = "relies on $TEST_KEY1"; + } + + // call containerLaunch + ContainerLaunch containerLaunch = new ContainerLaunch( + distContext, localConf, dispatcher, + containerExecutor, app, container, dirsHandler, containerManager); + containerLaunch.call(); + + // verify the nmPrivate paths and files + ArgumentCaptor cscArgument = + ArgumentCaptor.forClass(ContainerStartContext.class); + verify(containerExecutor, times(1)).launchContainer(cscArgument.capture()); + ContainerStartContext csc = cscArgument.getValue(); + Assert.assertEquals("script", + readStringFromPath(csc.getNmPrivateContainerScriptPath())); + + // verify env + ArgumentCaptor envArgument = ArgumentCaptor.forClass(Map.class); + verify(containerExecutor, times(1)).writeLaunchEnv(any(), + envArgument.capture(), any(), any(), any(), any(), any()); + Map env = envArgument.getValue(); + Assert.assertEquals(userVarVal, env.get(userVar)); + Assert.assertEquals(testVal1Expanded, env.get(testKey1)); + Assert.assertEquals(testVal2Expanded, env.get(testKey2)); + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/TestFpgaResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/TestFpgaResourceHandlerImpl.java index 10f6fe42a2176..189639d82b759 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/TestFpgaResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/TestFpgaResourceHandlerImpl.java @@ -73,8 +73,8 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import com.google.common.io.FileWriteMode; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.io.FileWriteMode; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; public class TestFpgaResourceHandlerImpl { @Rule diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceAllocator.java index a661de7dad219..26726d9f54998 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceAllocator.java @@ -58,7 +58,7 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Unit tests for GpuResourceAllocator. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandlerImpl.java index 7871179891591..a8914c318804e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandlerImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java index 016f801da73e2..8d8f748347c9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java @@ -94,7 +94,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Supplier; +import java.util.function.Supplier; public class TestContainerLocalizer { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java index cb877c4e243ac..38bcb640d2c46 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java @@ -161,7 +161,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestResourceLocalizationService { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java index 0a2d63e08e2f5..e5ecb724f8c7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java @@ -147,7 +147,7 @@ import org.mockito.Mockito; import org.eclipse.jetty.util.MultiException; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.slf4j.LoggerFactory; public class TestLogAggregationService extends BaseContainerManagerTest { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java index 2882b3203350d..1719e1b11db4e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java @@ -21,9 +21,13 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; import java.io.BufferedReader; import java.io.File; @@ -32,12 +36,14 @@ import java.io.PrintWriter; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.regex.Pattern; -import com.google.common.base.Supplier; +import java.util.function.Supplier; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; @@ -61,12 +67,19 @@ import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Event; +import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; +import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; @@ -76,6 +89,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.slf4j.LoggerFactory; @@ -88,6 +102,7 @@ public TestContainersMonitor() throws UnsupportedFileSystemException { static { LOG = LoggerFactory.getLogger(TestContainersMonitor.class); } + @Before public void setup() throws IOException { conf.setClass( @@ -353,6 +368,164 @@ public void testContainerKillOnMemoryOverflow() throws IOException, .build())); } + @SuppressWarnings("unchecked") + @Test + public void testContainerKillOnExcessLogDirectory() throws Exception { + final String user = "someuser"; + ApplicationId appId = ApplicationId.newInstance(1, 1); + ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); + ContainerId cid = ContainerId.newContainerId(attemptId, 1); + Application app = mock(Application.class); + doReturn(user).when(app).getUser(); + doReturn(appId).when(app).getAppId(); + Container container = mock(Container.class); + doReturn(cid).when(container).getContainerId(); + doReturn(user).when(container).getUser(); + File containerLogDir = new File(new File(localLogDir, appId.toString()), + cid.toString()); + containerLogDir.mkdirs(); + LocalDirsHandlerService mockDirsHandler = + mock(LocalDirsHandlerService.class); + doReturn(Collections.singletonList(localLogDir.getAbsolutePath())) + .when(mockDirsHandler).getLogDirsForRead(); + Context ctx = new NMContext(context.getContainerTokenSecretManager(), + context.getNMTokenSecretManager(), mockDirsHandler, + context.getApplicationACLsManager(), context.getNMStateStore(), + false, conf); + + Configuration monitorConf = new Configuration(conf); + monitorConf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false); + monitorConf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false); + monitorConf.setBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE, + false); + monitorConf.setBoolean(YarnConfiguration.NM_CONTAINER_LOG_MONITOR_ENABLED, + true); + monitorConf.setLong( + YarnConfiguration.NM_CONTAINER_LOG_DIR_SIZE_LIMIT_BYTES, 10); + monitorConf.setLong( + YarnConfiguration.NM_CONTAINER_LOG_TOTAL_SIZE_LIMIT_BYTES, 10000000); + monitorConf.setLong(YarnConfiguration.NM_CONTAINER_LOG_MON_INTERVAL_MS, + 10); + + EventHandler mockHandler = mock(EventHandler.class); + AsyncDispatcher mockDispatcher = mock(AsyncDispatcher.class); + doReturn(mockHandler).when(mockDispatcher).getEventHandler(); + ContainersMonitor monitor = new ContainersMonitorImpl( + mock(ContainerExecutor.class), mockDispatcher, ctx); + monitor.init(monitorConf); + monitor.start(); + Event event; + try { + ctx.getApplications().put(appId, app); + ctx.getContainers().put(cid, container); + monitor.handle(new ContainerStartMonitoringEvent(cid, 1, 1, 1, 0, 0)); + + PrintWriter fileWriter = new PrintWriter(new File(containerLogDir, + "log")); + fileWriter.write("This container is logging too much."); + fileWriter.close(); + + ArgumentCaptor captor = ArgumentCaptor.forClass(Event.class); + verify(mockHandler, timeout(10000)).handle(captor.capture()); + event = captor.getValue(); + } finally { + monitor.stop(); + } + + assertTrue("Expected a kill event", event instanceof ContainerKillEvent); + ContainerKillEvent cke = (ContainerKillEvent) event; + assertEquals("Unexpected container exit status", + ContainerExitStatus.KILLED_FOR_EXCESS_LOGS, + cke.getContainerExitStatus()); + } + + @SuppressWarnings("unchecked") + @Test + public void testContainerKillOnExcessTotalLogs() throws Exception { + final String user = "someuser"; + ApplicationId appId = ApplicationId.newInstance(1, 1); + ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); + ContainerId cid = ContainerId.newContainerId(attemptId, 1); + Application app = mock(Application.class); + doReturn(user).when(app).getUser(); + doReturn(appId).when(app).getAppId(); + Container container = mock(Container.class); + doReturn(cid).when(container).getContainerId(); + doReturn(user).when(container).getUser(); + File logDir1 = new File(localLogDir, "dir1"); + File logDir2 = new File(localLogDir, "dir2"); + List logDirs = new ArrayList<>(); + logDirs.add(logDir1.getAbsolutePath()); + logDirs.add(logDir2.getAbsolutePath()); + LocalDirsHandlerService mockDirsHandler = + mock(LocalDirsHandlerService.class); + doReturn(logDirs).when(mockDirsHandler).getLogDirsForRead(); + Context ctx = new NMContext(context.getContainerTokenSecretManager(), + context.getNMTokenSecretManager(), mockDirsHandler, + context.getApplicationACLsManager(), context.getNMStateStore(), + false, conf); + + File clogDir1 = new File(new File(logDir1, appId.toString()), + cid.toString()); + clogDir1.mkdirs(); + File clogDir2 = new File(new File(logDir2, appId.toString()), + cid.toString()); + clogDir2.mkdirs(); + + Configuration monitorConf = new Configuration(conf); + monitorConf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false); + monitorConf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false); + monitorConf.setBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE, + false); + monitorConf.setBoolean(YarnConfiguration.NM_CONTAINER_LOG_MONITOR_ENABLED, + true); + monitorConf.setLong( + YarnConfiguration.NM_CONTAINER_LOG_DIR_SIZE_LIMIT_BYTES, 100000); + monitorConf.setLong( + YarnConfiguration.NM_CONTAINER_LOG_TOTAL_SIZE_LIMIT_BYTES, 15); + monitorConf.setLong(YarnConfiguration.NM_CONTAINER_LOG_MON_INTERVAL_MS, + 10); + monitorConf.set(YarnConfiguration.NM_LOG_DIRS, logDir1.getAbsolutePath() + + "," + logDir2.getAbsolutePath()); + + EventHandler mockHandler = mock(EventHandler.class); + AsyncDispatcher mockDispatcher = mock(AsyncDispatcher.class); + doReturn(mockHandler).when(mockDispatcher).getEventHandler(); + ContainersMonitor monitor = new ContainersMonitorImpl( + mock(ContainerExecutor.class), mockDispatcher, ctx); + monitor.init(monitorConf); + monitor.start(); + Event event; + try { + ctx.getApplications().put(appId, app); + ctx.getContainers().put(cid, container); + monitor.handle(new ContainerStartMonitoringEvent(cid, 1, 1, 1, 0, 0)); + + PrintWriter fileWriter = new PrintWriter(new File(clogDir1, "log")); + fileWriter.write("0123456789"); + fileWriter.close(); + + Thread.sleep(1000); + verify(mockHandler, never()).handle(any(Event.class)); + + fileWriter = new PrintWriter(new File(clogDir2, "log")); + fileWriter.write("0123456789"); + fileWriter.close(); + + ArgumentCaptor captor = ArgumentCaptor.forClass(Event.class); + verify(mockHandler, timeout(10000)).handle(captor.capture()); + event = captor.getValue(); + } finally { + monitor.stop(); + } + + assertTrue("Expected a kill event", event instanceof ContainerKillEvent); + ContainerKillEvent cke = (ContainerKillEvent) event; + assertEquals("Unexpected container exit status", + ContainerExitStatus.KILLED_FOR_EXCESS_LOGS, + cke.getContainerExitStatus()); + } + @Test(timeout = 20000) public void testContainerMonitorMemFlags() { ContainersMonitor cm = null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestVEDeviceDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestVEDeviceDiscoverer.java index 30f27a2d51b97..1956a4ffde860 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestVEDeviceDiscoverer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nec/TestVEDeviceDiscoverer.java @@ -47,7 +47,7 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Unit tests for VEDeviceDiscoverer class. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java index 5e065cb340d33..749e0cc14de91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java @@ -21,11 +21,13 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import com.google.common.collect.Lists; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInformation; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.NMGpuResourceInfo; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.PerGpuDeviceInformation; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.PerGpuUtilizations; import org.junit.Assert; import org.junit.Test; import java.util.List; @@ -122,4 +124,45 @@ public void testGetNMResourceInfoAutoDiscoveryDisabled() (NMGpuResourceInfo) target.getNMResourceInfo(); Assert.assertNull(resourceInfo.getGpuDeviceInformation()); } + + @Test + public void testNodeGPUUtilization() + throws Exception { + GpuDiscoverer gpuDiscoverer = createNodeGPUUtilizationDiscoverer(); + + GpuNodeResourceUpdateHandler gpuNodeResourceUpdateHandler = + new GpuNodeResourceUpdateHandler(gpuDiscoverer, new Configuration()); + + Assert.assertEquals(0.5F, + gpuNodeResourceUpdateHandler.getNodeGpuUtilization(), 1e-6); + } + + private GpuDiscoverer createNodeGPUUtilizationDiscoverer() + throws YarnException { + GpuDiscoverer gpuDiscoverer = mock(GpuDiscoverer.class); + + PerGpuDeviceInformation gpu1 = + new PerGpuDeviceInformation(); + PerGpuUtilizations perGpuUtilizations1 = + new PerGpuUtilizations(); + perGpuUtilizations1.setOverallGpuUtilization(0.4F); + + gpu1.setGpuUtilizations(perGpuUtilizations1); + + PerGpuDeviceInformation gpu2 = + new PerGpuDeviceInformation(); + PerGpuUtilizations perGpuUtilizations2 = + new PerGpuUtilizations(); + perGpuUtilizations2.setOverallGpuUtilization(0.6F); + gpu2.setGpuUtilizations(perGpuUtilizations2); + + List gpus = Lists.newArrayList(); + gpus.add(gpu1); + gpus.add(gpu2); + + GpuDeviceInformation gpuDeviceInfo = new GpuDeviceInformation(); + gpuDeviceInfo.setGpus(gpus); + when(gpuDiscoverer.getGpuDeviceInformation()).thenReturn(gpuDeviceInfo); + return gpuDiscoverer; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java index c8b2eaf2d3998..bd064239c456d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV1CommandPlugin.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV2CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV2CommandPlugin.java index b0b523360efc3..d9300c63b10eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV2CommandPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestNvidiaDockerV2CommandPlugin.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java index 530a8e7260b87..508b8bd091505 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java @@ -28,7 +28,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import com.google.common.base.Supplier; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; @@ -132,7 +131,7 @@ public void postTransition(ContainerImpl op, protected ContainerManagerImpl createContainerManager( DeletionService delSrvc) { return new ContainerManagerImpl(context, exec, delSrvc, - nodeStatusUpdater, metrics, dirsHandler) { + getNodeStatusUpdater(), metrics, dirsHandler) { @Override protected UserGroupInformation getRemoteUgi() throws YarnException { @@ -1291,37 +1290,24 @@ public void testContainerUpdateExecTypeGuaranteedToOpportunistic() 1, updateResponse.getSuccessfullyUpdatedContainers().size()); Assert.assertTrue(updateResponse.getFailedRequests().isEmpty()); - GetContainerStatusesRequest statRequest = - GetContainerStatusesRequest.newInstance(Collections.singletonList(cId)); - GenericTestUtils.waitFor( - new Supplier() { - @Override - public Boolean get() { - try { - List containerStatuses = containerManager - .getContainerStatuses(statRequest).getContainerStatuses(); - Assert.assertEquals(1, containerStatuses.size()); - - ContainerStatus status = containerStatuses.get(0); - Assert.assertEquals( - org.apache.hadoop.yarn.api.records.ContainerState.RUNNING, - status.getState()); - - return status.getExecutionType() == ExecutionType.OPPORTUNISTIC; - } catch (Exception ex) { - throw new RuntimeException(ex); - } - } - }, 100, 10000); - List containerStatuses = containerManager - .getContainerStatuses(statRequest).getContainerStatuses(); - Assert.assertEquals(1, containerStatuses.size()); - for (ContainerStatus status : containerStatuses) { - Assert.assertEquals( - org.apache.hadoop.yarn.api.records.ContainerState.RUNNING, - status.getState()); - Assert - .assertEquals(ExecutionType.OPPORTUNISTIC, status.getExecutionType()); - } + final GetContainerStatusesRequest statRequest = + GetContainerStatusesRequest.newInstance( + Collections.singletonList(cId)); + final org.apache.hadoop.yarn.api.records.ContainerState expectedState = + org.apache.hadoop.yarn.api.records.ContainerState.RUNNING; + + GenericTestUtils.waitFor(() -> { + List containerStatuses; + try { + containerStatuses = containerManager + .getContainerStatuses(statRequest).getContainerStatuses(); + } catch (YarnException | IOException e) { + return false; + } + Assert.assertEquals(1, containerStatuses.size()); + ContainerStatus status = containerStatuses.get(0); + return (status.getState() == expectedState + && status.getExecutionType() == ExecutionType.OPPORTUNISTIC); + }, 20, 10000); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/health/TestNodeHealthCheckerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/health/TestNodeHealthCheckerService.java index 725cc7b882178..2b40fa8d4be9b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/health/TestNodeHealthCheckerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/health/TestNodeHealthCheckerService.java @@ -28,8 +28,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Joiner; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileUtil; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/logaggregation/tracker/TestNMLogAggregationStatusTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/logaggregation/tracker/TestNMLogAggregationStatusTracker.java index 4efc398f547e5..f6a7cba89ef55 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/logaggregation/tracker/TestNMLogAggregationStatusTracker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/logaggregation/tracker/TestNMLogAggregationStatusTracker.java @@ -21,7 +21,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java index c5f80ba958a52..37454747c9219 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java @@ -100,11 +100,15 @@ public void testReferenceOfSingletonJvmMetrics() { metrics.addContainerLaunchDuration(1); Assert.assertTrue(metrics.containerLaunchDuration.changed()); + // Set node gpu utilization + metrics.setNodeGpuUtilization(35.5F); + // availableGB is expected to be floored, // while allocatedGB is expected to be ceiled. // allocatedGB: 3.75GB allocated memory is shown as 4GB // availableGB: 4.25GB available memory is shown as 4GB - checkMetrics(10, 1, 1, 1, 1, 1, 4, 7, 4, 13, 3); + checkMetrics(10, 1, 1, 1, 1, + 1, 4, 7, 4, 13, 3, 35.5F); // Update resource and check available resource again metrics.addResource(total); @@ -116,7 +120,7 @@ public void testReferenceOfSingletonJvmMetrics() { public static void checkMetrics(int launched, int completed, int failed, int killed, int initing, int running, int allocatedGB, int allocatedContainers, int availableGB, int allocatedVCores, - int availableVCores) { + int availableVCores, Float nodeGpuUtilization) { MetricsRecordBuilder rb = getMetrics("NodeManagerMetrics"); assertCounter("ContainersLaunched", launched, rb); assertCounter("ContainersCompleted", completed, rb); @@ -129,6 +133,7 @@ public static void checkMetrics(int launched, int completed, int failed, assertGauge("AllocatedContainers", allocatedContainers, rb); assertGauge("AvailableGB", availableGB, rb); assertGauge("AvailableVCores",availableVCores, rb); + assertGauge("NodeGpuUtilization", nodeGpuUtilization, rb); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java index 4350bc0789a80..6e07fa5034d76 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java @@ -123,6 +123,11 @@ public String toString() { return ""; } + @Override + public String localizationCountersAsString() { + return ""; + } + @Override public ResourceSet getResourceSet() { return null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java index 70fe37310713e..30b73c1acc7ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.webapp; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index 63fdf652603e9..a3afd629443d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-resourcemanager - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN ResourceManager @@ -156,8 +156,8 @@ jetty-util-ajax - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava log4j diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java index d7b3f74322584..d6ed0d0047685 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index 6ad4e3ab8dee5..62ff1fe2cfca0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -41,7 +41,7 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.StandbyException; @@ -104,7 +104,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.protobuf.BlockingService; public class AdminService extends CompositeService implements @@ -201,7 +201,7 @@ protected void startServer() throws Exception { if (rm.getRMContext().isHAEnabled()) { RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB(this); @@ -730,6 +730,14 @@ public RefreshNodesResourcesResponse refreshNodesResources( // refresh dynamic resource in ResourceTrackerService this.rm.getRMContext().getResourceTrackerService(). updateDynamicResourceConfiguration(newConf); + + // Update our heartbeat configuration as well + Configuration ysconf = + getConfiguration(new Configuration(false), + YarnConfiguration.YARN_SITE_CONFIGURATION_FILE); + this.rm.getRMContext().getResourceTrackerService() + .updateHeartBeatConfiguration(ysconf); + RMAuditLogger.logSuccess(user.getShortUserName(), operation, "AdminService"); return response; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index 31456d625e84c..971b183f3d9f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -77,7 +77,7 @@ import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @SuppressWarnings("unchecked") @Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index a52a78881f3ce..516093c07eec6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -198,7 +198,7 @@ import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.UTCClock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; @@ -610,6 +610,8 @@ public SubmitApplicationResponse submitApplication( throw RPCUtil.getRemoteException(ie); } + checkTags(submissionContext.getApplicationTags()); + if (timelineServiceV2Enabled) { // Sanity check for flow run String value = null; @@ -752,6 +754,31 @@ public FailApplicationAttemptResponse failApplicationAttempt( return response; } + private void checkTags(Set tags) throws YarnException { + int appMaxTags = getConfig().getInt( + YarnConfiguration.RM_APPLICATION_MAX_TAGS, + YarnConfiguration.DEFAULT_RM_APPLICATION_MAX_TAGS); + int appMaxTagLength = getConfig().getInt( + YarnConfiguration.RM_APPLICATION_MAX_TAG_LENGTH, + YarnConfiguration.DEFAULT_RM_APPLICATION_MAX_TAG_LENGTH); + if (tags.size() > appMaxTags) { + throw RPCUtil.getRemoteException(new IllegalArgumentException( + "Too many applicationTags, a maximum of only " + appMaxTags + + " are allowed!")); + } + for (String tag : tags) { + if (tag.length() > appMaxTagLength) { + throw RPCUtil.getRemoteException( + new IllegalArgumentException("Tag " + tag + " is too long, " + + "maximum allowed length of a tag is " + appMaxTagLength)); + } + if (!org.apache.commons.lang3.StringUtils.isAsciiPrintable(tag)) { + throw RPCUtil.getRemoteException(new IllegalArgumentException( + "A tag can only have ASCII " + "characters! Invalid tag - " + tag)); + } + } + } + @SuppressWarnings("unchecked") @Override public KillApplicationResponse forceKillApplication( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java index ba5fc4084fe52..7fe5cc9703b1b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java @@ -30,8 +30,12 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableRate; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; @InterfaceAudience.Private @Metrics(context="yarn") @@ -50,13 +54,18 @@ public class ClusterMetrics { @Metric("AM register delay") MutableRate aMRegisterDelay; @Metric("AM container allocation delay") private MutableRate aMContainerAllocationDelay; + @Metric("Memory Utilization") MutableGaugeLong utilizedMB; + @Metric("Vcore Utilization") MutableGaugeLong utilizedVirtualCores; + @Metric("Memory Capability") MutableGaugeLong capabilityMB; + @Metric("Vcore Capability") MutableGaugeLong capabilityVirtualCores; + @Metric("GPU Capability") MutableGaugeLong capabilityGPUs; private static final MetricsInfo RECORD_INFO = info("ClusterMetrics", "Metrics for the Yarn Cluster"); private static volatile ClusterMetrics INSTANCE = null; private static MetricsRegistry registry; - + public static ClusterMetrics getMetrics() { if(!isInitialized.get()){ synchronized (ClusterMetrics.class) { @@ -80,7 +89,7 @@ private static void registerMetrics() { } @VisibleForTesting - synchronized static void destroy() { + public synchronized static void destroy() { isInitialized.set(false); INSTANCE = null; } @@ -192,6 +201,48 @@ public void addAMRegisterDelay(long delay) { aMRegisterDelay.add(delay); } + public long getCapabilityMB() { + return capabilityMB.value(); + } + + public long getCapabilityVirtualCores() { + return capabilityVirtualCores.value(); + } + + public long getCapabilityGPUs() { + if (capabilityGPUs == null) { + return 0; + } + + return capabilityGPUs.value(); + } + + public void incrCapability(Resource res) { + if (res != null) { + capabilityMB.incr(res.getMemorySize()); + capabilityVirtualCores.incr(res.getVirtualCores()); + Integer gpuIndex = ResourceUtils.getResourceTypeIndex() + .get(ResourceInformation.GPU_URI); + if (gpuIndex != null) { + capabilityGPUs.incr(res. + getResourceValue(ResourceInformation.GPU_URI)); + } + } + } + + public void decrCapability(Resource res) { + if (res != null) { + capabilityMB.decr(res.getMemorySize()); + capabilityVirtualCores.decr(res.getVirtualCores()); + Integer gpuIndex = ResourceUtils.getResourceTypeIndex() + .get(ResourceInformation.GPU_URI); + if (gpuIndex != null) { + capabilityGPUs.decr(res. + getResourceValue(ResourceInformation.GPU_URI)); + } + } + } + public void addAMContainerAllocationDelay(long delay) { aMContainerAllocationDelay.add(delay); } @@ -199,4 +250,28 @@ public void addAMContainerAllocationDelay(long delay) { public MutableRate getAMContainerAllocationDelay() { return aMContainerAllocationDelay; } -} + + public long getUtilizedMB() { + return utilizedMB.value(); + } + + public void incrUtilizedMB(long delta) { + utilizedMB.incr(delta); + } + + public void decrUtilizedMB(long delta) { + utilizedMB.decr(delta); + } + + public void decrUtilizedVirtualCores(long delta) { + utilizedVirtualCores.decr(delta); + } + + public long getUtilizedVirtualCores() { + return utilizedVirtualCores.value(); + } + + public void incrUtilizedVirtualCores(long delta) { + utilizedVirtualCores.incr(delta); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/CuratorBasedElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/CuratorBasedElectorService.java index 660b118faffea..6910b0b8f8b7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/CuratorBasedElectorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/CuratorBasedElectorService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.curator.framework.CuratorFramework; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DBManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DBManager.java new file mode 100644 index 0000000000000..c94dbd90b3eda --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DBManager.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager; + +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; +import org.fusesource.leveldbjni.JniDBFactory; +import org.fusesource.leveldbjni.internal.NativeDB; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBException; +import org.iq80.leveldb.Options; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.util.Timer; +import java.util.TimerTask; +import java.util.function.Consumer; + +import static org.fusesource.leveldbjni.JniDBFactory.bytes; + +public class DBManager implements Closeable { + public static final Logger LOG = + LoggerFactory.getLogger(DBManager.class); + private DB db; + private Timer compactionTimer; + + public DB initDatabase(File configurationFile, Options options, + Consumer initMethod) throws Exception { + try { + db = JniDBFactory.factory.open(configurationFile, options); + } catch (NativeDB.DBException e) { + if (e.isNotFound() || e.getMessage().contains(" does not exist ")) { + LOG.info("Creating configuration version/database at {}", + configurationFile); + options.createIfMissing(true); + try { + db = JniDBFactory.factory.open(configurationFile, options); + initMethod.accept(db); + } catch (DBException dbErr) { + throw new IOException(dbErr.getMessage(), dbErr); + } + } else { + throw e; + } + } + + return db; + } + + public void close() throws IOException { + if (compactionTimer != null) { + compactionTimer.cancel(); + compactionTimer = null; + } + if (db != null) { + db.close(); + db = null; + } + } + + public void storeVersion(String versionKey, Version versionValue) { + byte[] data = ((VersionPBImpl) versionValue).getProto().toByteArray(); + db.put(bytes(versionKey), data); + } + + public Version loadVersion(String versionKey) throws Exception { + Version version = null; + try { + byte[] data = db.get(bytes(versionKey)); + if (data != null) { + version = new VersionPBImpl(YarnServerCommonProtos.VersionProto + .parseFrom(data)); + } + } catch (DBException e) { + throw new IOException(e); + } + return version; + } + + @VisibleForTesting + public void setDb(DB db) { + this.db = db; + } + + public void startCompactionTimer(long compactionIntervalMsec, + String className) { + if (compactionIntervalMsec > 0) { + compactionTimer = new Timer( + className + " compaction timer", true); + compactionTimer.schedule(new CompactionTimerTask(), + compactionIntervalMsec, compactionIntervalMsec); + } + } + + private class CompactionTimerTask extends TimerTask { + @Override + public void run() { + long start = Time.monotonicNow(); + LOG.info("Starting full compaction cycle"); + try { + db.compactRange(null, null); + } catch (DBException e) { + LOG.error("Error compacting database", e); + } + long duration = Time.monotonicNow() - start; + LOG.info("Full compaction cycle completed in " + duration + " msec"); + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java index 1fab075ad5c7b..f9e159168c5bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java @@ -60,7 +60,7 @@ import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.SystemClock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @SuppressWarnings("unchecked") public class NodesListManager extends CompositeService implements diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java index f29d038e86ec8..3d7c18fc8baaa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.server.metrics.OpportunisticSchedulerMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.CentralizedOpportunisticContainerAllocator; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 4413e9d81c972..fe18d8252d1a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -26,6 +26,8 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.NodeId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -77,8 +79,8 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Times; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import org.apache.hadoop.yarn.util.StringHelper; /** @@ -93,7 +95,7 @@ public class RMAppManager implements EventHandler, private int maxCompletedAppsInMemory; private int maxCompletedAppsInStateStore; protected int completedAppsInStateStore = 0; - protected LinkedList completedApps = new LinkedList<>(); + private LinkedList completedApps = new LinkedList<>(); private final RMContext rmContext; private final ApplicationMasterService masterService; @@ -190,7 +192,16 @@ public static SummaryBuilder createAppSummary(RMApp app) { RMAppAttempt attempt = app.getCurrentAppAttempt(); if (attempt != null) { trackingUrl = attempt.getTrackingUrl(); - host = attempt.getHost(); + Container masterContainer = attempt.getMasterContainer(); + if (masterContainer != null) { + NodeId nodeId = masterContainer.getNodeId(); + if (nodeId != null) { + String amHost = nodeId.getHost(); + if (amHost != null) { + host = amHost; + } + } + } } RMAppMetrics metrics = app.getRMAppMetrics(); SummaryBuilder summary = new SummaryBuilder() @@ -316,72 +327,31 @@ protected void writeAuditLog(ApplicationId appId) { * check to see if hit the limit for max # completed apps kept */ protected synchronized void checkAppNumCompletedLimit() { - if (completedAppsInStateStore > maxCompletedAppsInStateStore) { - removeCompletedAppsFromStateStore(); - } - - if (completedApps.size() > maxCompletedAppsInMemory) { - removeCompletedAppsFromMemory(); - } - } - - private void removeCompletedAppsFromStateStore() { - int numDelete = completedAppsInStateStore - maxCompletedAppsInStateStore; - for (int i = 0; i < numDelete; i++) { - ApplicationId removeId = completedApps.get(i); + // check apps kept in state store. + while (completedAppsInStateStore > this.maxCompletedAppsInStateStore) { + ApplicationId removeId = + completedApps.get(completedApps.size() - completedAppsInStateStore); RMApp removeApp = rmContext.getRMApps().get(removeId); - boolean deleteApp = shouldDeleteApp(removeApp); - - if (deleteApp) { - LOG.info("Max number of completed apps kept in state store met:" - + " maxCompletedAppsInStateStore = " - + maxCompletedAppsInStateStore + ", removing app " + removeId - + " from state store."); - rmContext.getStateStore().removeApplication(removeApp); - completedAppsInStateStore--; - } else { - LOG.info("Max number of completed apps kept in state store met:" - + " maxCompletedAppsInStateStore = " - + maxCompletedAppsInStateStore + ", but not removing app " - + removeId - + " from state store as log aggregation have not finished yet."); - } + LOG.info("Max number of completed apps kept in state store met:" + + " maxCompletedAppsInStateStore = " + maxCompletedAppsInStateStore + + ", removing app " + removeApp.getApplicationId() + + " from state store."); + rmContext.getStateStore().removeApplication(removeApp); + completedAppsInStateStore--; + } + + // check apps kept in memory. + while (completedApps.size() > this.maxCompletedAppsInMemory) { + ApplicationId removeId = completedApps.remove(); + LOG.info("Application should be expired, max number of completed apps" + + " kept in memory met: maxCompletedAppsInMemory = " + + this.maxCompletedAppsInMemory + ", removing app " + removeId + + " from memory: "); + rmContext.getRMApps().remove(removeId); + this.applicationACLsManager.removeApplication(removeId); } } - private void removeCompletedAppsFromMemory() { - int numDelete = completedApps.size() - maxCompletedAppsInMemory; - int offset = 0; - for (int i = 0; i < numDelete; i++) { - int deletionIdx = i - offset; - ApplicationId removeId = completedApps.get(deletionIdx); - RMApp removeApp = rmContext.getRMApps().get(removeId); - boolean deleteApp = shouldDeleteApp(removeApp); - - if (deleteApp) { - ++offset; - LOG.info("Application should be expired, max number of completed apps" - + " kept in memory met: maxCompletedAppsInMemory = " - + this.maxCompletedAppsInMemory + ", removing app " + removeId - + " from memory: "); - completedApps.remove(deletionIdx); - rmContext.getRMApps().remove(removeId); - this.applicationACLsManager.removeApplication(removeId); - } else { - LOG.info("Application should be expired, max number of completed apps" - + " kept in memory met: maxCompletedAppsInMemory = " - + this.maxCompletedAppsInMemory + ", but not removing app " - + removeId - + " from memory as log aggregation have not finished yet."); - } - } - } - - private boolean shouldDeleteApp(RMApp app) { - return !app.isLogAggregationEnabled() - || app.isLogAggregationFinished(); - } - @SuppressWarnings("unchecked") protected void submitApplication( ApplicationSubmissionContext submissionContext, long submitTime, @@ -957,6 +927,10 @@ protected String getUserNameForPlacement(final String user, return usernameUsedForPlacement; } String queue = appPlacementContext.getQueue(); + String parent = appPlacementContext.getParentQueue(); + if (scheduler instanceof CapacityScheduler && parent != null) { + queue = parent + "." + queue; + } if (callerUGI != null && scheduler .checkAccess(callerUGI, QueueACL.SUBMIT_APPLICATIONS, queue)) { usernameUsedForPlacement = userNameFromAppTag; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index 54e0281f7e890..7f10138494e50 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -66,7 +66,7 @@ import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils; import org.apache.hadoop.yarn.util.Clock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMSecretManagerService.java index f7fb7e631e888..a551450512801 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMSecretManagerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMSecretManagerService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.conf.YarnConfiguration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java index 990514e2e0f50..880201f87e9d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java @@ -30,7 +30,7 @@ import java.util.Map; import java.util.Set; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index e71d3c78d4f03..79212e1fbce1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.sun.jersey.spi.container.servlet.ServletContainer; import org.slf4j.Logger; @@ -613,12 +613,20 @@ protected SystemMetricsPublisher createSystemMetricsPublisher() { // sanity check for configurations protected static void validateConfigs(Configuration conf) { // validate max-attempts - int globalMaxAppAttempts = - conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + int rmMaxAppAttempts = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); + if (rmMaxAppAttempts <= 0) { + throw new YarnRuntimeException("Invalid rm am max attempts configuration" + + ", " + YarnConfiguration.RM_AM_MAX_ATTEMPTS + + "=" + rmMaxAppAttempts + ", it should be a positive integer."); + } + int globalMaxAppAttempts = conf.getInt( + YarnConfiguration.GLOBAL_RM_AM_MAX_ATTEMPTS, + conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS)); if (globalMaxAppAttempts <= 0) { throw new YarnRuntimeException("Invalid global max attempts configuration" - + ", " + YarnConfiguration.RM_AM_MAX_ATTEMPTS + + ", " + YarnConfiguration.GLOBAL_RM_AM_MAX_ATTEMPTS + "=" + globalMaxAppAttempts + ", it should be a positive integer."); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index 2c89ddd9e9bd3..1f79e2b7d306b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -32,7 +32,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import org.apache.commons.collections.CollectionUtils; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -55,7 +55,6 @@ import org.apache.hadoop.yarn.api.records.NodeAttribute; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -93,7 +92,7 @@ import org.apache.hadoop.yarn.util.RackResolver; import org.apache.hadoop.yarn.util.YarnVersionInfo; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class ResourceTrackerService extends AbstractService implements ResourceTracker { @@ -114,6 +113,13 @@ public class ResourceTrackerService extends AbstractService implements private final WriteLock writeLock; private long nextHeartBeatInterval; + private boolean heartBeatIntervalScalingEnable; + private long heartBeatIntervalMin; + private long heartBeatIntervalMax; + private float heartBeatIntervalSpeedupFactor; + private float heartBeatIntervalSlowdownFactor; + + private Server server; private InetSocketAddress resourceTrackerAddress; private String minimumNodeManagerVersion; @@ -157,14 +163,6 @@ protected void serviceInit(Configuration conf) throws Exception { YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); RackResolver.init(conf); - nextHeartBeatInterval = - conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, - YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS); - if (nextHeartBeatInterval <= 0) { - throw new YarnRuntimeException("Invalid Configuration. " - + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS - + " should be larger than 0."); - } checkIpHostnameInRegistration = conf.getBoolean( YarnConfiguration.RM_NM_REGISTRATION_IP_HOSTNAME_CHECK_KEY, @@ -188,7 +186,7 @@ protected void serviceInit(Configuration conf) throws Exception { isDelegatedCentralizedNodeLabelsConf = YarnConfiguration.isDelegatedCentralizedNodeLabelConfiguration(conf); } - + updateHeartBeatConfiguration(conf); loadDynamicResourceConfiguration(conf); decommissioningWatcher.init(conf); super.serviceInit(conf); @@ -233,6 +231,84 @@ public void updateDynamicResourceConfiguration( } } + /** + * Update HearBeatConfiguration with new configuration. + * @param conf Yarn Configuration + */ + public void updateHeartBeatConfiguration(Configuration conf) { + this.writeLock.lock(); + try { + nextHeartBeatInterval = + conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, + YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS); + heartBeatIntervalScalingEnable = + conf.getBoolean( + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_SCALING_ENABLE, + YarnConfiguration. + DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SCALING_ENABLE); + heartBeatIntervalMin = + conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MIN_MS, + YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MIN_MS); + heartBeatIntervalMax = + conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MAX_MS, + YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MAX_MS); + heartBeatIntervalSpeedupFactor = + conf.getFloat( + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_SPEEDUP_FACTOR, + YarnConfiguration. + DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SPEEDUP_FACTOR); + heartBeatIntervalSlowdownFactor = + conf.getFloat( + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_SLOWDOWN_FACTOR, + YarnConfiguration. + DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SLOWDOWN_FACTOR); + + if (nextHeartBeatInterval <= 0) { + LOG.warn("HeartBeat interval: " + nextHeartBeatInterval + + " must be greater than 0, using default."); + nextHeartBeatInterval = + YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS; + } + + if (heartBeatIntervalScalingEnable) { + if (heartBeatIntervalMin <= 0 + || heartBeatIntervalMin > heartBeatIntervalMax + || nextHeartBeatInterval < heartBeatIntervalMin + || nextHeartBeatInterval > heartBeatIntervalMax) { + LOG.warn("Invalid NM Heartbeat Configuration. " + + "Required: 0 < minimum <= interval <= maximum. Got: 0 < " + + heartBeatIntervalMin + " <= " + + nextHeartBeatInterval + " <= " + + heartBeatIntervalMax + + " Setting min and max to configured interval."); + heartBeatIntervalMin = nextHeartBeatInterval; + heartBeatIntervalMax = nextHeartBeatInterval; + } + if (heartBeatIntervalSpeedupFactor < 0 + || heartBeatIntervalSlowdownFactor < 0) { + LOG.warn( + "Heartbeat scaling factors must be >= 0 " + + " SpeedupFactor:" + heartBeatIntervalSpeedupFactor + + " SlowdownFactor:" + heartBeatIntervalSlowdownFactor + + ". Using Defaults"); + heartBeatIntervalSlowdownFactor = + YarnConfiguration. + DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SLOWDOWN_FACTOR; + heartBeatIntervalSpeedupFactor = + YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SPEEDUP_FACTOR; + } + LOG.info("Heartbeat Scaling Configuration: " + + " defaultInterval:" + nextHeartBeatInterval + + " minimumInterval:" + heartBeatIntervalMin + + " maximumInterval:" + heartBeatIntervalMax + + " speedupFactor:" + heartBeatIntervalSpeedupFactor + + " slowdownFactor:" + heartBeatIntervalSlowdownFactor); + } + } finally { + this.writeLock.unlock(); + } + } + @Override protected void serviceStart() throws Exception { super.serviceStart(); @@ -335,6 +411,7 @@ public RegisterNodeManagerResponse registerNodeManager( Resource capability = request.getResource(); String nodeManagerVersion = request.getNMVersion(); Resource physicalResource = request.getPhysicalResource(); + NodeStatus nodeStatus = request.getNodeStatus(); RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); @@ -426,7 +503,7 @@ public RegisterNodeManagerResponse registerNodeManager( if (oldNode == null) { RMNodeStartedEvent startEvent = new RMNodeStartedEvent(nodeId, request.getNMContainerStatuses(), - request.getRunningApplications()); + request.getRunningApplications(), nodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { if (LOG.isDebugEnabled()) { @@ -462,7 +539,7 @@ public RegisterNodeManagerResponse registerNodeManager( this.rmContext.getRMNodes().put(nodeId, rmNode); this.rmContext.getDispatcher().getEventHandler() - .handle(new RMNodeStartedEvent(nodeId, null, null)); + .handle(new RMNodeStartedEvent(nodeId, null, null, nodeStatus)); } else { // Reset heartbeat ID since node just restarted. oldNode.resetLastNodeHeartBeatResponse(); @@ -628,10 +705,17 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) } // Heartbeat response + long newInterval = nextHeartBeatInterval; + if (heartBeatIntervalScalingEnable) { + newInterval = rmNode.calculateHeartBeatInterval( + nextHeartBeatInterval, heartBeatIntervalMin, + heartBeatIntervalMax, heartBeatIntervalSpeedupFactor, + heartBeatIntervalSlowdownFactor); + } NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse( getNextResponseId(lastNodeHeartbeatResponse.getResponseId()), - NodeAction.NORMAL, null, null, null, null, nextHeartBeatInterval); + NodeAction.NORMAL, null, null, null, null, newInterval); rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse); populateKeys(request, nodeHeartBeatResponse); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/RMApplicationHistoryWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/RMApplicationHistoryWriter.java index d1639655600e7..79b048b7e1d0e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/RMApplicationHistoryWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/RMApplicationHistoryWriter.java @@ -52,7 +52,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** *

    diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java index c8595fdba9b7a..c9a2b725928f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java @@ -68,7 +68,7 @@ import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The launch of the AM itself. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java index 55c35e3a9e0d7..9f4de2868a1fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java @@ -23,7 +23,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java index 530184fe1b9d1..5407f52d775eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java @@ -68,7 +68,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Implements {@link FederationStateStore} and provides a service for diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java index e84d585550891..d3591730279cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java @@ -62,7 +62,7 @@ import org.apache.hadoop.yarn.util.TimelineServiceHelper; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class is responsible for posting application, appattempt & Container diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java index 32030709f350c..a6dfd7c28354b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java @@ -29,7 +29,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class SchedulingMonitor extends AbstractService { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java index adb7835df7c00..5418e8b1f8b1f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitorManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java index 7985296fcafa2..7f4fd91be4398 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java @@ -74,4 +74,8 @@ TempQueuePerPartition getQueueByPartition(String queueName, @Unstable IntraQueuePreemptionOrderPolicy getIntraQueuePreemptionOrderPolicy(); + + boolean getCrossQueuePreemptionConservativeDRF(); + + boolean getInQueuePreemptionConservativeDRF(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java index 28a74498af9a4..d9e9091bc86f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java @@ -116,7 +116,9 @@ public Map> selectCandidates( .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedCandidates, curCandidates, - totalPreemptionAllowed, false); + totalPreemptionAllowed, + preemptionContext.getCrossQueuePreemptionConservativeDRF() + ); if (!preempted) { continue; } @@ -193,7 +195,8 @@ private void preemptAMContainers(Resource clusterResource, boolean preempted = CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, preemptMap, - curCandidates, totalPreemptionAllowed, false); + curCandidates, totalPreemptionAllowed, + preemptionContext.getCrossQueuePreemptionConservativeDRF()); if (preempted) { Resources.subtractFrom(skippedAMSize, c.getAllocatedResource()); } @@ -229,7 +232,8 @@ private void preemptFrom(FiCaSchedulerApp app, CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedContainers, - curCandidates, totalPreemptionAllowed, false); + curCandidates, totalPreemptionAllowed, + preemptionContext.getCrossQueuePreemptionConservativeDRF()); if (!preemptionContext.isObserveOnly()) { preemptionContext.getRMContext().getDispatcher().getEventHandler() @@ -273,7 +277,8 @@ private void preemptFrom(FiCaSchedulerApp app, CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedContainers, - curCandidates, totalPreemptionAllowed, false); + curCandidates, totalPreemptionAllowed, + preemptionContext.getCrossQueuePreemptionConservativeDRF()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java index 8a1b47b5dee3b..cea1bca773690 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java @@ -263,7 +263,8 @@ private void preemptFromLeastStarvedApp(LeafQueue leafQueue, boolean ret = CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedCandidates, - curCandidates, totalPreemptedResourceAllowed, true); + curCandidates, totalPreemptedResourceAllowed, + preemptionContext.getInQueuePreemptionConservativeDRF()); // Subtract from respective user's resource usage once a container is // selected for preemption. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptionCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptionCandidatesSelector.java index 3c97364ec0250..ae4ca5c60a5a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptionCandidatesSelector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptionCandidatesSelector.java @@ -23,7 +23,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.util.Collections; import java.util.Comparator; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java index 915018de22132..3ff1a052011f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -112,6 +112,9 @@ public enum IntraQueuePreemptionOrderPolicy { private float minimumThresholdForIntraQueuePreemption; private IntraQueuePreemptionOrderPolicy intraQueuePreemptionOrderPolicy; + private boolean crossQueuePreemptionConservativeDRF; + private boolean inQueuePreemptionConservativeDRF; + // Current configuration private CapacitySchedulerConfiguration csConfig; @@ -224,6 +227,18 @@ private void updateConfigIfNeeded() { CapacitySchedulerConfiguration.DEFAULT_INTRAQUEUE_PREEMPTION_ORDER_POLICY) .toUpperCase()); + crossQueuePreemptionConservativeDRF = config.getBoolean( + CapacitySchedulerConfiguration. + CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + CapacitySchedulerConfiguration. + DEFAULT_CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF); + + inQueuePreemptionConservativeDRF = config.getBoolean( + CapacitySchedulerConfiguration. + IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + CapacitySchedulerConfiguration. + DEFAULT_IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF); + candidatesSelectionPolicies = new ArrayList<>(); // Do we need white queue-priority preemption policy? @@ -299,7 +314,12 @@ private void updateConfigIfNeeded() { selectCandidatesForResevedContainers + "\n" + "additional_res_balance_based_on_reserved_containers = " + additionalPreemptionBasedOnReservedResource + "\n" + - "Preemption-to-balance-queue-enabled = " + isPreemptionToBalanceRequired); + "Preemption-to-balance-queue-enabled = " + + isPreemptionToBalanceRequired + "\n" + + "cross-queue-preemption.conservative-drf = " + + crossQueuePreemptionConservativeDRF + "\n" + + "in-queue-preemption.conservative-drf = " + + inQueuePreemptionConservativeDRF); csConfig = config; } @@ -422,7 +442,7 @@ private Set getLeafQueueNames(TempQueuePerPartition q) { return leafQueueNames; } - + /** * This method selects and tracks containers to be preemptionCandidates. If a container * is in the target list for more than maxWaitTime it is killed. @@ -781,6 +801,16 @@ public IntraQueuePreemptionOrderPolicy getIntraQueuePreemptionOrderPolicy() { return intraQueuePreemptionOrderPolicy; } + @Override + public boolean getCrossQueuePreemptionConservativeDRF() { + return crossQueuePreemptionConservativeDRF; + } + + @Override + public boolean getInQueuePreemptionConservativeDRF() { + return inQueuePreemptionConservativeDRF; + } + @Override public long getDefaultMaximumKillWaitTimeout() { return maxWaitTime; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java index fd1e9c51d9193..402f9a8b42320 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; -import com.google.common.collect.HashBasedTable; -import com.google.common.collect.Table; +import org.apache.hadoop.thirdparty.com.google.common.collect.HashBasedTable; +import org.apache.hadoop.thirdparty.com.google.common.collect.Table; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java index 51a7fb0246493..2ea44d2aa67fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.monitor.invariants; -import com.google.common.base.Charsets; -import com.google.common.io.Files; +import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import org.apache.hadoop.thirdparty.com.google.common.io.Files; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsRecord; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java index d3edba4d5921c..d2a51be850730 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/NodeAttributesManagerImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.nodelabels; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; @@ -35,7 +35,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,7 +61,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAttributesUpdateSchedulerEvent; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; /** * Manager holding the attributes to Labels. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMDelegatedNodeLabelsUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMDelegatedNodeLabelsUpdater.java index 9ffeacfaa3674..9bd58f13c4055 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMDelegatedNodeLabelsUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMDelegatedNodeLabelsUpdater.java @@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Update nodes labels map for ResourceManager periodically. It collects diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java index 370a2f0633dc1..1a08e03e699cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java @@ -43,7 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeLabelsUpdateSchedulerEvent; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class RMNodeLabelsManager extends CommonNodeLabelsManager { protected static class Queue { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java index c8a29b46e7d95..63d98ba6c4032 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java @@ -48,7 +48,7 @@ public class AppNameMappingPlacementRule extends PlacementRule { private static final String QUEUE_MAPPING_NAME = "app-name"; private boolean overrideWithQueueMappings = false; - private List mappings = null; + private List mappings = null; protected CapacitySchedulerQueueManager queueManager; public AppNameMappingPlacementRule() { @@ -56,7 +56,7 @@ public AppNameMappingPlacementRule() { } public AppNameMappingPlacementRule(boolean overrideWithQueueMappings, - List newMappings) { + List newMappings) { this.overrideWithQueueMappings = overrideWithQueueMappings; this.mappings = newMappings; } @@ -76,18 +76,16 @@ public boolean initialize(ResourceScheduler scheduler) LOG.info( "Initialized App Name queue mappings, override: " + overrideWithQueueMappings); - List queueMappings = + List queueMappings = conf.getQueueMappingEntity(QUEUE_MAPPING_NAME); // Get new user mappings - List newMappings = new ArrayList<>(); + List newMappings = new ArrayList<>(); queueManager = schedulerContext.getCapacitySchedulerQueueManager(); // check if mappings refer to valid queues - for (QueueMappingEntity mapping : queueMappings) { - QueuePath queuePath = mapping.getQueuePath(); - + for (QueueMapping mapping : queueMappings) { if (isStaticQueueMapping(mapping)) { //at this point mapping.getQueueName() return only the queue name, since //the config parsing have been changed making QueueMapping more @@ -98,7 +96,7 @@ public boolean initialize(ResourceScheduler scheduler) //Try getting queue by its full path name, if it exists it is a static //leaf queue indeed, without any auto creation magic - if (queueManager.isAmbiguous(queuePath.getFullPath())) { + if (queueManager.isAmbiguous(mapping.getFullPath())) { throw new IOException( "mapping contains ambiguous leaf queue reference " + mapping .getFullPath()); @@ -109,9 +107,8 @@ public boolean initialize(ResourceScheduler scheduler) //validate if parent queue is specified, // then it should exist and // be an instance of AutoCreateEnabledParentQueue - QueueMappingEntity newMapping = - validateAndGetAutoCreatedQueueMapping(queueManager, mapping, - queuePath); + QueueMapping newMapping = + validateAndGetAutoCreatedQueueMapping(queueManager, mapping); if (newMapping == null) { throw new IOException( "mapping contains invalid or non-leaf queue " + mapping @@ -123,8 +120,8 @@ public boolean initialize(ResourceScheduler scheduler) // if its an instance of leaf queue // if its an instance of auto created leaf queue, // then extract parent queue name and update queue mapping - QueueMappingEntity newMapping = validateAndGetQueueMapping( - queueManager, queue, mapping, queuePath); + QueueMapping newMapping = validateAndGetQueueMapping( + queueManager, queue, mapping); newMappings.add(newMapping); } } else { @@ -134,8 +131,8 @@ public boolean initialize(ResourceScheduler scheduler) // if parent queue is specified, then // parent queue exists and an instance of AutoCreateEnabledParentQueue // - QueueMappingEntity newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( + queueManager, mapping); if (newMapping != null) { newMappings.add(newMapping); } else{ @@ -160,7 +157,7 @@ private static boolean ifQueueDoesNotExist(CSQueue queue) { private ApplicationPlacementContext getAppPlacementContext(String user, String applicationName) throws IOException { - for (QueueMappingEntity mapping : mappings) { + for (QueueMapping mapping : mappings) { if (mapping.getSource().equals(CURRENT_APP_MAPPING)) { if (mapping.getQueue().equals(CURRENT_APP_MAPPING)) { return getPlacementContext(mapping, applicationName, queueManager); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/DefaultPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/DefaultPlacementRule.java index cd6551a822f5f..89ec6166254ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/DefaultPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/DefaultPlacementRule.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.placement; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/FSPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/FSPlacementRule.java index 7ceb3745d53ec..474660e9f5d2a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/FSPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/FSPlacementRule.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.placement; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java index 6a6c3b93264e0..4e9195d15b1f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java @@ -28,7 +28,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.exceptions.YarnException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class PlacementManager { private static final Logger LOG = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java index c3b3cc62a487a..b142dd6c1008a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMapping.java @@ -66,10 +66,20 @@ public QueueMappingBuilder parentQueue(String mappingParentQueue) { return this; } - public QueueMappingBuilder queuePath(QueuePath path) { - this.queue = path.getLeafQueue(); - this.parentQueue = path.getParentQueue(); - return this; + public QueueMappingBuilder parsePathString(String queuePath) { + int parentQueueNameEndIndex = queuePath.lastIndexOf(DOT); + + if (parentQueueNameEndIndex > -1) { + final String parentQueue = + queuePath.substring(0, parentQueueNameEndIndex).trim(); + final String leafQueue = + queuePath.substring(parentQueueNameEndIndex + 1).trim(); + return this + .parentQueue(parentQueue) + .queue(leafQueue); + } + + return this.queue(queuePath); } public QueueMapping build() { @@ -82,6 +92,7 @@ private QueueMapping(QueueMappingBuilder builder) { this.source = builder.source; this.queue = builder.queue; this.parentQueue = builder.parentQueue; + this.fullPath = (parentQueue != null) ? (parentQueue + DOT + queue) : queue; } /** @@ -89,8 +100,9 @@ private QueueMapping(QueueMappingBuilder builder) { * */ public enum MappingType { - - USER("u"), GROUP("g"); + USER("u"), + GROUP("g"), + APPLICATION("a"); private final String type; @@ -108,6 +120,7 @@ public String toString() { private String source; private String queue; private String parentQueue; + private String fullPath; private final static String DELIMITER = ":"; @@ -132,13 +145,7 @@ public String getSource() { } public String getFullPath() { - return (parentQueue != null ? parentQueue + DOT + queue : queue); - } - - public QueuePath getQueuePath() { - //This is to make sure the parsing is the same everywhere, but the - //whole parsing part should be moved to QueuePathConstructor - return QueuePlacementRuleUtils.extractQueuePath(getFullPath()); + return fullPath; } @Override @@ -197,4 +204,10 @@ public String toString() { return type.toString() + DELIMITER + source + DELIMITER + (parentQueue != null ? parentQueue + "." + queue : queue); } + + public String toTypelessString() { + return source + DELIMITER + + (parentQueue != null ? parentQueue + "." + queue : queue); + } + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMappingEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMappingEntity.java deleted file mode 100644 index ccb611a6c1ae7..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueueMappingEntity.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.server.resourcemanager.placement; - -import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT; - -public class QueueMappingEntity { - private String source; - private String queue; - private String parentQueue; - private String fullPath; - - public final static String DELIMITER = ":"; - - public QueueMappingEntity(String source, String queue) { - this.source = source; - this.queue = queue; - this.parentQueue = null; - this.fullPath = queue; - } - public QueueMappingEntity(String source, String queue, String parentQueue) { - this.source = source; - this.queue = queue; - this.parentQueue = parentQueue; - this.fullPath = parentQueue + DOT + queue; - } - - public QueueMappingEntity(String source, QueuePath path) { - this.source = source; - this.queue = path.getLeafQueue(); - this.parentQueue = path.getParentQueue(); - this.fullPath = parentQueue + DOT + queue; - } - - public String getQueue() { - return queue; - } - - public String getParentQueue() { - return parentQueue; - } - - public String getFullPath() { - return fullPath; - } - - public String getSource() { - return source; - } - - public boolean hasParentQueue() { - return parentQueue != null; - } - - public QueuePath getQueuePath() { - //This is to make sure the parsing is the same everywhere, but the - //whole parsing part should be moved to QueuePathConstructor - return QueuePlacementRuleUtils.extractQueuePath(getFullPath()); - } - - @Override - public int hashCode() { - return super.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof QueueMappingEntity) { - QueueMappingEntity other = (QueueMappingEntity) obj; - return (other.source.equals(source) && - other.queue.equals(queue)); - } else { - return false; - } - } - - public String toString() { - return source + DELIMITER + (parentQueue != null ? - parentQueue + DOT + queue : - queue); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePath.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePath.java deleted file mode 100644 index e02cf58145b89..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePath.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.server.resourcemanager.placement; - -import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT; - -public class QueuePath { - - private String parentQueue; - private String leafQueue; - private String fullPath; - - public QueuePath(final String leafQueue) { - //if the queue does not have a parent, the full path == leaf queue name - this.leafQueue = leafQueue; - this.fullPath = leafQueue; - } - - public QueuePath(final String parentQueue, final String leafQueue) { - this.parentQueue = parentQueue; - this.leafQueue = leafQueue; - this.fullPath = parentQueue + DOT + leafQueue; - } - - public String getParentQueue() { - return parentQueue; - } - - public String getLeafQueue() { - return leafQueue; - } - - public boolean hasParentQueue() { - return parentQueue != null; - } - - public String getFullPath() { - return fullPath; - } - - @Override - public String toString() { - return fullPath; - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java index b9b35c1007d2d..76e3e275fc9f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/QueuePlacementRuleUtils.java @@ -65,25 +65,24 @@ public static void validateQueueMappingUnderParentQueue( } } - public static QueueMappingEntity validateAndGetAutoCreatedQueueMapping( - CapacitySchedulerQueueManager queueManager, QueueMappingEntity mapping, - QueuePath queuePath) throws IOException { - if (queuePath.hasParentQueue()) { + public static QueueMapping validateAndGetAutoCreatedQueueMapping( + CapacitySchedulerQueueManager queueManager, QueueMapping mapping) + throws IOException { + if (mapping.hasParentQueue()) { //if parent queue is specified, // then it should exist and be an instance of ManagedParentQueue validateQueueMappingUnderParentQueue(queueManager.getQueue( - queuePath.getParentQueue()), queuePath.getParentQueue(), - queuePath.getFullPath()); - return new QueueMappingEntity(mapping.getSource(), - queuePath.getFullPath(), queuePath.getParentQueue()); + mapping.getParentQueue()), mapping.getParentQueue(), + mapping.getFullPath()); + return mapping; } return null; } - public static QueueMappingEntity validateAndGetQueueMapping( + public static QueueMapping validateAndGetQueueMapping( CapacitySchedulerQueueManager queueManager, CSQueue queue, - QueueMappingEntity mapping, QueuePath queuePath) throws IOException { + QueueMapping mapping) throws IOException { if (!(queue instanceof LeafQueue)) { throw new IOException( "mapping contains invalid or non-leaf queue : " + @@ -93,8 +92,8 @@ public static QueueMappingEntity validateAndGetQueueMapping( if (queue instanceof AutoCreatedLeafQueue && queue .getParent() instanceof ManagedParentQueue) { - QueueMappingEntity newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( + queueManager, mapping); if (newMapping == null) { throw new IOException( "mapping contains invalid or non-leaf queue " + @@ -105,34 +104,20 @@ public static QueueMappingEntity validateAndGetQueueMapping( return mapping; } - public static boolean isStaticQueueMapping(QueueMappingEntity mapping) { + public static boolean isStaticQueueMapping(QueueMapping mapping) { return !mapping.getQueue().contains(CURRENT_USER_MAPPING) && !mapping .getQueue().contains(PRIMARY_GROUP_MAPPING) && !mapping.getQueue().contains(SECONDARY_GROUP_MAPPING); } - public static QueuePath extractQueuePath(String queuePath) { - int parentQueueNameEndIndex = queuePath.lastIndexOf(DOT); - - if (parentQueueNameEndIndex > -1) { - final String parentQueue = queuePath.substring(0, parentQueueNameEndIndex) - .trim(); - final String leafQueue = queuePath.substring(parentQueueNameEndIndex + 1) - .trim(); - return new QueuePath(parentQueue, leafQueue); - } - - return new QueuePath(queuePath); - } - public static ApplicationPlacementContext getPlacementContext( - QueueMappingEntity mapping, CapacitySchedulerQueueManager queueManager) + QueueMapping mapping, CapacitySchedulerQueueManager queueManager) throws IOException { return getPlacementContext(mapping, mapping.getQueue(), queueManager); } public static ApplicationPlacementContext getPlacementContext( - QueueMappingEntity mapping, String leafQueueName, + QueueMapping mapping, String leafQueueName, CapacitySchedulerQueueManager queueManager) throws IOException { //leafQueue name no longer identifies a queue uniquely checking ambiguity diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java index 4f975821a3c34..8eb912bca7c72 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java @@ -44,7 +44,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class UserGroupMappingPlacementRule extends PlacementRule { private static final Logger LOG = LoggerFactory @@ -105,35 +105,76 @@ private ApplicationPlacementContext getPlacementForUser(String user) if (mapping.getParentQueue() != null && mapping.getParentQueue().equals(PRIMARY_GROUP_MAPPING) && mapping.getQueue().equals(CURRENT_USER_MAPPING)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "primary group current user mapping", user); + } return getContextForGroupParent(user, mapping, getPrimaryGroup(user)); } else if (mapping.getParentQueue() != null && mapping.getParentQueue().equals(SECONDARY_GROUP_MAPPING) && mapping.getQueue().equals(CURRENT_USER_MAPPING)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "secondary group current user mapping", user); + } return getContextForGroupParent(user, mapping, getSecondaryGroup(user)); } else if (mapping.getQueue().equals(CURRENT_USER_MAPPING)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "current user mapping", user); + } return getPlacementContext(mapping, user); } else if (mapping.getQueue().equals(PRIMARY_GROUP_MAPPING)) { - return getContextForPrimaryGroup(user, mapping); + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "primary group mapping", user); + } + return getPlacementContext(mapping, getPrimaryGroup(user)); } else if (mapping.getQueue().equals(SECONDARY_GROUP_MAPPING)) { - return getContextForSecondaryGroup(user, mapping); + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "secondary group mapping", user); + } + return getPlacementContext(mapping, getSecondaryGroup(user)); } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "static user static mapping", user); + } return getPlacementContext(mapping); } } if (user.equals(mapping.getSource())) { if (mapping.getQueue().equals(PRIMARY_GROUP_MAPPING)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "static user primary group mapping", user); + } return getPlacementContext(mapping, getPrimaryGroup(user)); } else if (mapping.getQueue().equals(SECONDARY_GROUP_MAPPING)) { String secondaryGroup = getSecondaryGroup(user); if (secondaryGroup != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "static user secondary group mapping", user); + } return getPlacementContext(mapping, secondaryGroup); } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Wanted to create placement context for user {}" + + " using static user secondary group mapping," + + " but user has no secondary group!", user); + } return null; } } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "static user static mapping", user); + } return getPlacementContext(mapping); } } @@ -142,8 +183,16 @@ private ApplicationPlacementContext getPlacementForUser(String user) for (String userGroups : groups.getGroups(user)) { if (userGroups.equals(mapping.getSource())) { if (mapping.getQueue().equals(CURRENT_USER_MAPPING)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "static group current user mapping", user); + } return getPlacementContext(mapping, user); } + if (LOG.isDebugEnabled()) { + LOG.debug("Creating placement context for user {} using " + + "static group static mapping", user); + } return getPlacementContext(mapping); } } @@ -152,47 +201,23 @@ private ApplicationPlacementContext getPlacementForUser(String user) return null; } - // invoked for mappings: - // u:%user:[parent].%primary_group - // u:%user:%primary_group - private ApplicationPlacementContext getContextForPrimaryGroup( - String user, - QueueMapping mapping) throws IOException { - String group = - CapacitySchedulerConfiguration.ROOT + "." + getPrimaryGroup(user); - - CSQueue parent = queueManager.getQueue(mapping.getParentQueue()); - - if (parent instanceof ManagedParentQueue) { - return getPlacementContext(mapping, group); - } else { - CSQueue queue = this.queueManager.getQueue(group); - if ( queue != null) { - return getPlacementContext(mapping, queue.getQueuePath()); - } else { - return null; - } - } - } - - // invoked for mappings - // u:%user:%secondary_group - // u:%user:[parent].%secondary_group - private ApplicationPlacementContext getContextForSecondaryGroup( - String user, - QueueMapping mapping) throws IOException { - String secondaryGroup = getSecondaryGroup(user); - - if (secondaryGroup != null) { - CSQueue queue = this.queueManager.getQueue(secondaryGroup); - if ( queue != null) { - return getPlacementContext(mapping, queue.getQueuePath()); - } else { - return null; - } - } else { - return null; - } + /** + * This convenience method allows to change the parent path or a leafName in + * a mapping object, by creating a new one, using the builder and copying the + * rest of the parameters. + * @param mapping The mapping to be changed + * @param parentPath The new parentPath of the mapping + * @param leafName The new leafQueueName of the mapping + * @return The updated NEW mapping + */ + private QueueMapping alterMapping( + QueueMapping mapping, String parentPath, String leafName) { + return QueueMappingBuilder.create() + .type(mapping.getType()) + .source(mapping.getSource()) + .queue(leafName) + .parentQueue(parentPath) + .build(); } // invoked for mappings: @@ -203,20 +228,24 @@ private ApplicationPlacementContext getContextForGroupParent( QueueMapping mapping, String group) throws IOException { - if (this.queueManager.getQueue(group) != null) { + CSQueue groupQueue = this.queueManager.getQueue(group); + if (groupQueue != null) { // replace the group string - QueueMapping resolvedGroupMapping = - QueueMappingBuilder.create() - .type(mapping.getType()) - .source(mapping.getSource()) - .queue(user) - .parentQueue( - CapacitySchedulerConfiguration.ROOT + "." + - group) - .build(); + QueueMapping resolvedGroupMapping = alterMapping( + mapping, + groupQueue.getQueuePath(), + user); validateQueueMapping(resolvedGroupMapping); return getPlacementContext(resolvedGroupMapping, user); } else { + + if (queueManager.isAmbiguous(group)) { + LOG.info("Queue mapping rule expect group queue to exist with name {}" + + " but the reference is ambiguous!", group); + } else { + LOG.info("Queue mapping rule expect group queue to exist with name {}" + + " but it does not exist!", group); + } return null; } } @@ -245,7 +274,7 @@ public ApplicationPlacementContext getPlacementForApp( } catch (IOException ioex) { String message = "Failed to submit application " + applicationId + " submitted by user " + user + " reason: " + ioex.getMessage(); - throw new YarnException(message); + throw new YarnException(message, ioex); } } return null; @@ -258,7 +287,6 @@ private ApplicationPlacementContext getPlacementContext( private ApplicationPlacementContext getPlacementContext(QueueMapping mapping, String leafQueueName) throws IOException { - //leafQueue name no longer identifies a queue uniquely checking ambiguity if (!mapping.hasParentQueue() && queueManager.isAmbiguous(leafQueueName)) { throw new IOException("mapping contains ambiguous leaf queue reference " + @@ -266,13 +294,72 @@ private ApplicationPlacementContext getPlacementContext(QueueMapping mapping, } if (!StringUtils.isEmpty(mapping.getParentQueue())) { - return new ApplicationPlacementContext(leafQueueName, - mapping.getParentQueue()); - } else{ - return new ApplicationPlacementContext(leafQueueName); + return getPlacementContextWithParent(mapping, leafQueueName); + } else { + return getPlacementContextNoParent(leafQueueName); } } + private ApplicationPlacementContext getPlacementContextWithParent( + QueueMapping mapping, + String leafQueueName) { + CSQueue parent = queueManager.getQueue(mapping.getParentQueue()); + //we don't find the specified parent, so the placement rule is invalid + //for this case + if (parent == null) { + if (queueManager.isAmbiguous(mapping.getParentQueue())) { + LOG.warn("Placement rule specified a parent queue {}, but it is" + + "ambiguous.", mapping.getParentQueue()); + } else { + LOG.warn("Placement rule specified a parent queue {}, but it does" + + "not exist.", mapping.getParentQueue()); + } + return null; + } + + String parentPath = parent.getQueuePath(); + + //if we have a parent which is not a managed parent, we check if the leaf + //queue exists under this parent + if (!(parent instanceof ManagedParentQueue)) { + CSQueue queue = queueManager.getQueue( + parentPath + "." + leafQueueName); + //if the queue doesn't exit we return null + if (queue == null) { + LOG.warn("Placement rule specified a parent queue {}, but it is" + + " not a managed parent queue, and no queue exists with name {} " + + "under it.", mapping.getParentQueue(), leafQueueName); + return null; + } + } + //at this point we either have a managed parent or the queue actually + //exists so we have a placement context, returning it + return new ApplicationPlacementContext(leafQueueName, parentPath); + } + + private ApplicationPlacementContext getPlacementContextNoParent( + String leafQueueName) { + //in this case we don't have a parent specified so we expect the queue to + //exist, otherwise the mapping will not be valid for this case + CSQueue queue = queueManager.getQueue(leafQueueName); + if (queue == null) { + if (queueManager.isAmbiguous(leafQueueName)) { + LOG.warn("Queue {} specified in placement rule is ambiguous", + leafQueueName); + } else { + LOG.warn("Queue {} specified in placement rule does not exist", + leafQueueName); + } + return null; + } + + //getting parent path to make sure if the leaf name would become ambiguous + //the placement context stays valid. + CSQueue parent = queueManager.getQueue(leafQueueName).getParent(); + return new ApplicationPlacementContext( + leafQueueName, parent.getQueuePath()); + } + @VisibleForTesting @Override public boolean initialize(ResourceScheduler scheduler) @@ -301,7 +388,6 @@ public boolean initialize(ResourceScheduler scheduler) //at this point mapping.getQueueName() return only the queue name, since //the config parsing have been changed making QueueMapping more consistent - QueuePath queuePath = mapping.getQueuePath(); if (isStaticQueueMapping(mapping)) { //Try getting queue by its full path name, if it exists it is a static //leaf queue indeed, without any auto creation magic @@ -322,7 +408,7 @@ public boolean initialize(ResourceScheduler scheduler) // then it should exist and // be an instance of AutoCreateEnabledParentQueue QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + queueManager, mapping); if (newMapping == null) { throw new IOException( "mapping contains invalid or non-leaf queue " + mapping @@ -335,7 +421,7 @@ public boolean initialize(ResourceScheduler scheduler) // if its an instance of auto created leaf queue, // then extract parent queue name and update queue mapping QueueMapping newMapping = validateAndGetQueueMapping(queueManager, - queue, mapping, queuePath); + queue, mapping); newMappings.add(newMapping); } } else{ @@ -346,7 +432,7 @@ public boolean initialize(ResourceScheduler scheduler) // parent queue exists and an instance of AutoCreateEnabledParentQueue // QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + queueManager, mapping); if (newMapping != null) { newMappings.add(newMapping); } else{ @@ -368,7 +454,7 @@ public boolean initialize(ResourceScheduler scheduler) private static QueueMapping validateAndGetQueueMapping( CapacitySchedulerQueueManager queueManager, CSQueue queue, - QueueMapping mapping, QueuePath queuePath) throws IOException { + QueueMapping mapping) throws IOException { if (!(queue instanceof LeafQueue)) { throw new IOException( "mapping contains invalid or non-leaf queue : " + @@ -379,7 +465,7 @@ private static QueueMapping validateAndGetQueueMapping( .getParent() instanceof ManagedParentQueue) { QueueMapping newMapping = validateAndGetAutoCreatedQueueMapping( - queueManager, mapping, queuePath); + queueManager, mapping); if (newMapping == null) { throw new IOException( "mapping contains invalid or non-leaf queue " @@ -395,30 +481,20 @@ private static boolean ifQueueDoesNotExist(CSQueue queue) { } private static QueueMapping validateAndGetAutoCreatedQueueMapping( - CapacitySchedulerQueueManager queueManager, QueueMapping mapping, - QueuePath queuePath) throws IOException { - if (queuePath.hasParentQueue() - && (queuePath.getParentQueue().equals(PRIMARY_GROUP_MAPPING) - || queuePath.getParentQueue().equals(SECONDARY_GROUP_MAPPING))) { + CapacitySchedulerQueueManager queueManager, QueueMapping mapping) + throws IOException { + if (mapping.hasParentQueue() + && (mapping.getParentQueue().equals(PRIMARY_GROUP_MAPPING) + || mapping.getParentQueue().equals(SECONDARY_GROUP_MAPPING))) { // dynamic parent queue - return QueueMappingBuilder.create() - .type(mapping.getType()) - .source(mapping.getSource()) - .queue(queuePath.getLeafQueue()) - .parentQueue(queuePath.getParentQueue()) - .build(); - } else if (queuePath.hasParentQueue()) { + return mapping; + } else if (mapping.hasParentQueue()) { //if parent queue is specified, // then it should exist and be an instance of ManagedParentQueue QueuePlacementRuleUtils.validateQueueMappingUnderParentQueue( - queueManager.getQueue(queuePath.getParentQueue()), - queuePath.getParentQueue(), queuePath.getLeafQueue()); - return QueueMappingBuilder.create() - .type(mapping.getType()) - .source(mapping.getSource()) - .queue(queuePath.getLeafQueue()) - .parentQueue(queuePath.getParentQueue()) - .build(); + queueManager.getQueue(mapping.getParentQueue()), + mapping.getParentQueue(), mapping.getQueue()); + return mapping; } return null; @@ -447,10 +523,12 @@ private void validateQueueMapping(QueueMapping queueMapping) //as mapping.getQueue() if (leafQueue == null && queueManager.isAmbiguous(leafQueueFullName)) { throw new IOException("mapping contains ambiguous leaf queue name: " - + leafQueueFullName); - } else { - throw new IOException("mapping contains invalid or non-leaf queue : " - + leafQueueFullName); + + leafQueueFullName); + } else if (parentQueue == null || + (!(parentQueue instanceof ManagedParentQueue))) { + throw new IOException("mapping contains invalid or non-leaf queue " + + " and no managed parent is found: " + + leafQueueFullName); } } else if (parentQueue == null || (!(parentQueue instanceof ParentQueue))) { throw new IOException( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/preprocessor/SubmissionContextPreProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/preprocessor/SubmissionContextPreProcessor.java index 68cc4cf1e2024..bf3fe5bc7f113 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/preprocessor/SubmissionContextPreProcessor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/preprocessor/SubmissionContextPreProcessor.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.preprocessor; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index 4e61811c4a941..1f0a31ae47470 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -69,7 +69,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.EpochPBImpl; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java index 2420735748bea..e29ea07a8671c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java @@ -31,9 +31,8 @@ import java.security.cert.X509Certificate; import java.util.HashMap; import java.util.Map.Entry; -import java.util.Timer; -import java.util.TimerTask; +import org.apache.hadoop.yarn.server.resourcemanager.DBManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -42,13 +41,11 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.delegation.DelegationKey; -import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProto; @@ -56,7 +53,6 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ReservationAllocationStateProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.records.Version; -import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; @@ -66,14 +62,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.utils.LeveldbIterator; -import org.fusesource.leveldbjni.JniDBFactory; -import org.fusesource.leveldbjni.internal.NativeDB; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBException; import org.iq80.leveldb.Options; import org.iq80.leveldb.WriteBatch; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Changes from 1.0 to 1.1, Addition of ReservationSystem state. @@ -100,7 +94,7 @@ public class LeveldbRMStateStore extends RMStateStore { .newInstance(1, 1); private DB db; - private Timer compactionTimer; + private DBManager dbManager = new DBManager(); private long compactionIntervalMsec; private String getApplicationNodeKey(ApplicationId appId) { @@ -140,7 +134,7 @@ private String getProxyCAPrivateKeyNodeKey() { } @Override - protected void initInternal(Configuration conf) throws Exception { + protected void initInternal(Configuration conf) { compactionIntervalMsec = conf.getLong( YarnConfiguration.RM_LEVELDB_COMPACTION_INTERVAL_SECS, YarnConfiguration.DEFAULT_RM_LEVELDB_COMPACTION_INTERVAL_SECS) * 1000; @@ -165,55 +159,20 @@ private Path createStorageDir() throws IOException { @Override protected void startInternal() throws Exception { - db = openDatabase(); - startCompactionTimer(); - } - - protected DB openDatabase() throws Exception { Path storeRoot = createStorageDir(); Options options = new Options(); options.createIfMissing(false); LOG.info("Using state database at " + storeRoot + " for recovery"); File dbfile = new File(storeRoot.toString()); - try { - db = JniDBFactory.factory.open(dbfile, options); - } catch (NativeDB.DBException e) { - if (e.isNotFound() || e.getMessage().contains(" does not exist ")) { - LOG.info("Creating state database at " + dbfile); - options.createIfMissing(true); - try { - db = JniDBFactory.factory.open(dbfile, options); - // store version - storeVersion(); - } catch (DBException dbErr) { - throw new IOException(dbErr.getMessage(), dbErr); - } - } else { - throw e; - } - } - return db; - } - - private void startCompactionTimer() { - if (compactionIntervalMsec > 0) { - compactionTimer = new Timer( - this.getClass().getSimpleName() + " compaction timer", true); - compactionTimer.schedule(new CompactionTimerTask(), - compactionIntervalMsec, compactionIntervalMsec); - } + db = dbManager.initDatabase(dbfile, options, (database) -> + storeVersion(CURRENT_VERSION_INFO)); + dbManager.startCompactionTimer(compactionIntervalMsec, + this.getClass().getSimpleName()); } @Override protected void closeInternal() throws Exception { - if (compactionTimer != null) { - compactionTimer.cancel(); - compactionTimer = null; - } - if (db != null) { - db.close(); - db = null; - } + dbManager.close(); } @VisibleForTesting @@ -228,33 +187,22 @@ DB getDatabase() { @Override protected Version loadVersion() throws Exception { - Version version = null; - try { - byte[] data = db.get(bytes(VERSION_NODE)); - if (data != null) { - version = new VersionPBImpl(VersionProto.parseFrom(data)); - } - } catch (DBException e) { - throw new IOException(e); - } - return version; + return dbManager.loadVersion(VERSION_NODE); } @Override protected void storeVersion() throws Exception { - dbStoreVersion(CURRENT_VERSION_INFO); - } - - void dbStoreVersion(Version state) throws IOException { - String key = VERSION_NODE; - byte[] data = ((VersionPBImpl) state).getProto().toByteArray(); try { - db.put(bytes(key), data); + storeVersion(CURRENT_VERSION_INFO); } catch (DBException e) { throw new IOException(e); } } + protected void storeVersion(Version version) { + dbManager.storeVersion(VERSION_NODE, version); + } + @Override protected Version getCurrentVersion() { return CURRENT_VERSION_INFO; @@ -290,9 +238,7 @@ public RMState loadState() throws Exception { private void loadReservationState(RMState rmState) throws IOException { int numReservations = 0; - LeveldbIterator iter = null; - try { - iter = new LeveldbIterator(db); + try (LeveldbIterator iter = new LeveldbIterator(db)) { iter.seek(bytes(RM_RESERVATION_KEY_PREFIX)); while (iter.hasNext()) { Entry entry = iter.next(); @@ -324,10 +270,6 @@ private void loadReservationState(RMState rmState) throws IOException { } } catch (DBException e) { throw new IOException(e); - } finally { - if (iter != null) { - iter.close(); - } } LOG.info("Recovered " + numReservations + " reservations"); } @@ -342,9 +284,7 @@ private void loadRMDTSecretManagerState(RMState state) throws IOException { private int loadRMDTSecretManagerKeys(RMState state) throws IOException { int numKeys = 0; - LeveldbIterator iter = null; - try { - iter = new LeveldbIterator(db); + try (LeveldbIterator iter = new LeveldbIterator(db)) { iter.seek(bytes(RM_DT_MASTER_KEY_KEY_PREFIX)); while (iter.hasNext()) { Entry entry = iter.next(); @@ -361,10 +301,6 @@ private int loadRMDTSecretManagerKeys(RMState state) throws IOException { } } catch (DBException e) { throw new IOException(e); - } finally { - if (iter != null) { - iter.close(); - } } return numKeys; } @@ -382,9 +318,7 @@ private DelegationKey loadDelegationKey(byte[] data) throws IOException { private int loadRMDTSecretManagerTokens(RMState state) throws IOException { int numTokens = 0; - LeveldbIterator iter = null; - try { - iter = new LeveldbIterator(db); + try (LeveldbIterator iter = new LeveldbIterator(db)) { iter.seek(bytes(RM_DT_TOKEN_KEY_PREFIX)); while (iter.hasNext()) { Entry entry = iter.next(); @@ -404,17 +338,13 @@ private int loadRMDTSecretManagerTokens(RMState state) throws IOException { } } catch (DBException e) { throw new IOException(e); - } finally { - if (iter != null) { - iter.close(); - } } return numTokens; } private RMDelegationTokenIdentifierData loadDelegationToken(byte[] data) throws IOException { - RMDelegationTokenIdentifierData tokenData = null; + RMDelegationTokenIdentifierData tokenData; DataInputStream in = new DataInputStream(new ByteArrayInputStream(data)); try { tokenData = RMStateStoreUtils.readRMDelegationTokenIdentifierData(in); @@ -426,7 +356,7 @@ private RMDelegationTokenIdentifierData loadDelegationToken(byte[] data) private void loadRMDTSecretManagerTokenSequenceNumber(RMState state) throws IOException { - byte[] data = null; + byte[] data; try { data = db.get(bytes(RM_DT_SEQUENCE_NUMBER_KEY)); } catch (DBException e) { @@ -445,9 +375,7 @@ private void loadRMDTSecretManagerTokenSequenceNumber(RMState state) private void loadRMApps(RMState state) throws IOException { int numApps = 0; int numAppAttempts = 0; - LeveldbIterator iter = null; - try { - iter = new LeveldbIterator(db); + try (LeveldbIterator iter = new LeveldbIterator(db)) { iter.seek(bytes(RM_APP_KEY_PREFIX)); while (iter.hasNext()) { Entry entry = iter.next(); @@ -467,10 +395,6 @@ private void loadRMApps(RMState state) throws IOException { } } catch (DBException e) { throw new IOException(e); - } finally { - if (iter != null) { - iter.close(); - } } LOG.info("Recovered " + numApps + " applications and " + numAppAttempts + " application attempts"); @@ -523,7 +447,7 @@ private ApplicationStateData createApplicationState(String appIdStr, @VisibleForTesting ApplicationStateData loadRMAppState(ApplicationId appId) throws IOException { String appKey = getApplicationNodeKey(appId); - byte[] data = null; + byte[] data; try { data = db.get(bytes(appKey)); } catch (DBException e) { @@ -539,7 +463,7 @@ ApplicationStateData loadRMAppState(ApplicationId appId) throws IOException { ApplicationAttemptStateData loadRMAppAttemptState( ApplicationAttemptId attemptId) throws IOException { String attemptKey = getApplicationAttemptNodeKey(attemptId); - byte[] data = null; + byte[] data; try { data = db.get(bytes(attemptKey)); } catch (DBException e) { @@ -668,8 +592,7 @@ protected void removeApplicationStateInternal(ApplicationStateData appState) appState.getApplicationSubmissionContext().getApplicationId(); String appKey = getApplicationNodeKey(appId); try { - WriteBatch batch = db.createWriteBatch(); - try { + try (WriteBatch batch = db.createWriteBatch()) { batch.delete(bytes(appKey)); for (ApplicationAttemptId attemptId : appState.attempts.keySet()) { String attemptKey = getApplicationAttemptNodeKey(appKey, attemptId); @@ -680,8 +603,6 @@ protected void removeApplicationStateInternal(ApplicationStateData appState) + appState.attempts.size() + " attempts" + " at " + appKey); } db.write(batch); - } finally { - batch.close(); } } catch (DBException e) { throw new IOException(e); @@ -693,16 +614,13 @@ protected void storeReservationState( ReservationAllocationStateProto reservationAllocation, String planName, String reservationIdName) throws Exception { try { - WriteBatch batch = db.createWriteBatch(); - try { + try (WriteBatch batch = db.createWriteBatch()) { String key = getReservationNodeKey(planName, reservationIdName); LOG.debug("Storing state for reservation {} plan {} at {}", reservationIdName, planName, key); batch.put(bytes(key), reservationAllocation.toByteArray()); db.write(batch); - } finally { - batch.close(); } } catch (DBException e) { throw new IOException(e); @@ -713,16 +631,13 @@ protected void storeReservationState( protected void removeReservationState(String planName, String reservationIdName) throws Exception { try { - WriteBatch batch = db.createWriteBatch(); - try { + try (WriteBatch batch = db.createWriteBatch()) { String reservationKey = getReservationNodeKey(planName, reservationIdName); batch.delete(bytes(reservationKey)); LOG.debug("Removing state for reservation {} plan {} at {}", reservationIdName, planName, reservationKey); db.write(batch); - } finally { - batch.close(); } } catch (DBException e) { throw new IOException(e); @@ -736,10 +651,9 @@ private void storeOrUpdateRMDT(RMDelegationTokenIdentifier tokenId, new RMDelegationTokenIdentifierData(tokenId, renewDate); LOG.debug("Storing token to {}", tokenKey); try { - WriteBatch batch = db.createWriteBatch(); - try { + try (WriteBatch batch = db.createWriteBatch()) { batch.put(bytes(tokenKey), tokenData.toByteArray()); - if(!isUpdate) { + if (!isUpdate) { ByteArrayOutputStream bs = new ByteArrayOutputStream(); try (DataOutputStream ds = new DataOutputStream(bs)) { ds.writeInt(tokenId.getSequenceNumber()); @@ -749,8 +663,6 @@ private void storeOrUpdateRMDT(RMDelegationTokenIdentifier tokenId, batch.put(bytes(RM_DT_SEQUENCE_NUMBER_KEY), bs.toByteArray()); } db.write(batch); - } finally { - batch.close(); } } catch (DBException e) { throw new IOException(e); @@ -789,11 +701,8 @@ protected void storeRMDTMasterKeyState(DelegationKey masterKey) String dbKey = getRMDTMasterKeyNodeKey(masterKey); LOG.debug("Storing token master key to {}", dbKey); ByteArrayOutputStream os = new ByteArrayOutputStream(); - DataOutputStream out = new DataOutputStream(os); - try { + try (DataOutputStream out = new DataOutputStream(os)) { masterKey.write(out); - } finally { - out.close(); } try { db.put(bytes(dbKey), os.toByteArray()); @@ -833,13 +742,10 @@ protected void storeProxyCACertState( String caPrivateKeyKey = getProxyCAPrivateKeyNodeKey(); try { - WriteBatch batch = db.createWriteBatch(); - try { + try (WriteBatch batch = db.createWriteBatch()) { batch.put(bytes(caCertKey), caCertData); batch.put(bytes(caPrivateKeyKey), caPrivateKeyData); db.write(batch); - } finally { - batch.close(); } } catch (DBException e) { throw new IOException(e); @@ -871,9 +777,7 @@ public synchronized void removeApplication(ApplicationId removeAppId) @VisibleForTesting int getNumEntriesInDatabase() throws IOException { int numEntries = 0; - LeveldbIterator iter = null; - try { - iter = new LeveldbIterator(db); + try (LeveldbIterator iter = new LeveldbIterator(db)) { iter.seekToFirst(); while (iter.hasNext()) { Entry entry = iter.next(); @@ -882,26 +786,12 @@ int getNumEntriesInDatabase() throws IOException { } } catch (DBException e) { throw new IOException(e); - } finally { - if (iter != null) { - iter.close(); - } } return numEntries; } - private class CompactionTimerTask extends TimerTask { - @Override - public void run() { - long start = Time.monotonicNow(); - LOG.info("Starting full compaction cycle"); - try { - db.compactRange(null, null); - } catch (DBException e) { - LOG.error("Error compacting database", e); - } - long duration = Time.monotonicNow() - start; - LOG.info("Full compaction cycle completed in " + duration + " msec"); - } + @VisibleForTesting + protected void setDbManager(DBManager dbManager) { + this.dbManager = dbManager; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java index 8c82af89842c7..82a2f49696e26 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java @@ -40,7 +40,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index 052f6b0499efb..87f3f0ded4581 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -39,8 +39,8 @@ import javax.crypto.SecretKey; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateUpdateAppEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateUpdateAppEvent.java index a43d43ef0315a..1c156df0fcb14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateUpdateAppEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateUpdateAppEvent.java @@ -20,7 +20,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; public class RMStateUpdateAppEvent extends RMStateStoreEvent { private final ApplicationStateData appState; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index fd0a2141e6889..55c9d9eeaee22 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -18,7 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.util.Clock; +import org.apache.hadoop.yarn.util.SystemClock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.curator.framework.CuratorFramework; @@ -234,6 +236,10 @@ public class ZKRMStateStore extends RMStateStore { /** Manager for the ZooKeeper connection. */ private ZKCuratorManager zkManager; + private volatile Clock clock = SystemClock.getInstance(); + @VisibleForTesting + protected ZKRMStateStoreOpDurations opDurations; + /* * Indicates different app attempt state store operations. */ @@ -329,6 +335,8 @@ public synchronized void initInternal(Configuration conf) appIdNodeSplitIndex = YarnConfiguration.DEFAULT_ZK_APPID_NODE_SPLIT_INDEX; } + opDurations = ZKRMStateStoreOpDurations.getInstance(); + zkAcl = ZKCuratorManager.getZKAcls(conf); if (HAUtil.isHAEnabled(conf)) { @@ -518,6 +526,7 @@ public synchronized long getAndIncrementEpoch() throws Exception { @Override public synchronized RMState loadState() throws Exception { + long start = clock.getTime(); RMState rmState = new RMState(); // recover DelegationTokenSecretManager loadRMDTSecretManagerState(rmState); @@ -529,6 +538,7 @@ public synchronized RMState loadState() throws Exception { loadReservationSystemState(rmState); // recover ProxyCAManager state loadProxyCAManagerState(rmState); + opDurations.addLoadStateCallDuration(clock.getTime() - start); return rmState; } @@ -834,6 +844,7 @@ private void loadProxyCAManagerState(RMState rmState) throws Exception { @Override public synchronized void storeApplicationStateInternal(ApplicationId appId, ApplicationStateData appStateDataPB) throws Exception { + long start = clock.getTime(); String nodeCreatePath = getLeafAppIdNodePath(appId.toString(), true); LOG.debug("Storing info for app: {} at: {}", appId, nodeCreatePath); @@ -850,12 +861,14 @@ public synchronized void storeApplicationStateInternal(ApplicationId appId, + " exceeds the maximum allowed size for application data. " + "See yarn.resourcemanager.zk-max-znode-size.bytes."); } + opDurations.addStoreApplicationStateCallDuration(clock.getTime() - start); } @Override protected synchronized void updateApplicationStateInternal( ApplicationId appId, ApplicationStateData appStateDataPB) throws Exception { + long start = clock.getTime(); String nodeUpdatePath = getLeafAppIdNodePath(appId.toString(), false); boolean pathExists = true; // Look for paths based on other split indices if path as per split index @@ -892,6 +905,7 @@ protected synchronized void updateApplicationStateInternal( LOG.debug("Path {} for {} didn't exist. Creating a new znode to update" + " the application state.", nodeUpdatePath, appId); } + opDurations.addUpdateApplicationStateCallDuration(clock.getTime() - start); } /* @@ -976,8 +990,10 @@ protected synchronized void removeApplicationAttemptInternal( @Override protected synchronized void removeApplicationStateInternal( ApplicationStateData appState) throws Exception { + long start = clock.getTime(); removeApp(appState.getApplicationSubmissionContext(). getApplicationId().toString(), true, appState.attempts.keySet()); + opDurations.addRemoveApplicationStateCallDuration(clock.getTime() - start); } private void removeApp(String removeAppId) throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStoreOpDurations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStoreOpDurations.java new file mode 100644 index 0000000000000..f1ec2419f7a6e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStoreOpDurations.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.recovery; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; + +import static org.apache.hadoop.metrics2.lib.Interns.info; +import org.apache.hadoop.metrics2.lib.MutableRate; + +/** + * Class to capture the performance metrics of ZKRMStateStore. + * This should be a singleton. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +@Metrics(context="ZKRMStateStore-op-durations") +public final class ZKRMStateStoreOpDurations implements MetricsSource { + + @Metric("Duration for a load state call") + MutableRate loadStateCall; + + @Metric("Duration for a store application state call") + MutableRate storeApplicationStateCall; + + @Metric("Duration for a update application state call") + MutableRate updateApplicationStateCall; + + @Metric("Duration to handle a remove application state call") + MutableRate removeApplicationStateCall; + + protected static final MetricsInfo RECORD_INFO = + info("ZKRMStateStoreOpDurations", "Durations of ZKRMStateStore calls"); + + private final MetricsRegistry registry; + + private static final ZKRMStateStoreOpDurations INSTANCE + = new ZKRMStateStoreOpDurations(); + + public static ZKRMStateStoreOpDurations getInstance() { + return INSTANCE; + } + + private ZKRMStateStoreOpDurations() { + registry = new MetricsRegistry(RECORD_INFO); + registry.tag(RECORD_INFO, "ZKRMStateStoreOpDurations"); + + MetricsSystem ms = DefaultMetricsSystem.instance(); + if (ms != null) { + ms.register(RECORD_INFO.name(), RECORD_INFO.description(), this); + } + } + + @Override + public synchronized void getMetrics(MetricsCollector collector, boolean all) { + registry.snapshot(collector.addRecord(registry.info()), all); + } + + public void addLoadStateCallDuration(long value) { + loadStateCall.add(value); + } + + public void addStoreApplicationStateCallDuration(long value) { + storeApplicationStateCall.add(value); + } + + public void addUpdateApplicationStateCallDuration(long value) { + updateApplicationStateCall.add(value); + } + + public void addRemoveApplicationStateCallDuration(long value) { + removeApplicationStateCall.add(value); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java index d3269447a909f..73b8a9b27d377 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java @@ -31,7 +31,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This data structure stores a periodic {@link RLESparseResourceAllocation}. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java index bb4a7fb52acbe..f0a42e2c10b2a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java @@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.Resource; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A ReservationAllocation represents a concrete allocation of resources over diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/SimpleCapacityReplanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/SimpleCapacityReplanner.java index 1c248e11a0535..32b912d51263d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/SimpleCapacityReplanner.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/SimpleCapacityReplanner.java @@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This (re)planner scan a period of time from now to a maximum time window (or diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java index 49f7d0876b610..983504eefe668 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.resource; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java index 535888cfcb422..d8323f5497b9a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java @@ -243,10 +243,6 @@ ApplicationReport createAndGetApplicationReport(String clientUserName, */ int getMaxAppAttempts(); - boolean isLogAggregationEnabled(); - - boolean isLogAggregationFinished(); - /** * Returns the application type * @return the application type. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 8223fe2778e8e..23f49b0464994 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -110,7 +110,7 @@ import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @SuppressWarnings({ "rawtypes", "unchecked" }) public class RMAppImpl implements RMApp, Recoverable { @@ -453,11 +453,20 @@ public RMAppImpl(ApplicationId applicationId, RMContext rmContext, this.applicationPriority = Priority.newInstance(0); } - int globalMaxAppAttempts = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + int globalMaxAppAttempts = conf.getInt( + YarnConfiguration.GLOBAL_RM_AM_MAX_ATTEMPTS, + conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS)); + int rmMaxAppAttempts = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); int individualMaxAppAttempts = submissionContext.getMaxAppAttempts(); - if (individualMaxAppAttempts <= 0 || - individualMaxAppAttempts > globalMaxAppAttempts) { + if (individualMaxAppAttempts <= 0) { + this.maxAppAttempts = rmMaxAppAttempts; + LOG.warn("The specific max attempts: " + individualMaxAppAttempts + + " for application: " + applicationId.getId() + + " is invalid, because it is less than or equal to zero." + + " Use the rm max attempts instead."); + } else if (individualMaxAppAttempts > globalMaxAppAttempts) { this.maxAppAttempts = globalMaxAppAttempts; LOG.warn("The specific max attempts: " + individualMaxAppAttempts + " for application: " + applicationId.getId() @@ -1211,8 +1220,9 @@ private String getAppAttemptFailedDiagnostics(RMAppEvent event) { + " failed due to " + failedEvent.getDiagnosticMsg() + ". Failing the application."; } else if (this.isNumAttemptsBeyondThreshold) { - int globalLimit = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, - YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); + int globalLimit = conf.getInt(YarnConfiguration.GLOBAL_RM_AM_MAX_ATTEMPTS, + conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS)); msg = String.format( "Application %s failed %d times%s%s due to %s. Failing the application.", getApplicationId(), @@ -1755,16 +1765,6 @@ public void aggregateLogReport(NodeId nodeId, LogAggregationReport report) { logAggregation.aggregateLogReport(nodeId, report, this); } - @Override - public boolean isLogAggregationFinished() { - return logAggregation.isFinished(); - } - - @Override - public boolean isLogAggregationEnabled() { - return logAggregation.isEnabled(); - } - public String getLogAggregationFailureMessagesForNM(NodeId nodeId) { return logAggregation.getLogAggregationFailureMessagesForNM(nodeId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 6087e58cf23c8..2d6de3750e982 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -116,7 +116,7 @@ import org.apache.hadoop.yarn.util.BoundedAppender; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @SuppressWarnings({"unchecked", "rawtypes"}) public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { @@ -179,6 +179,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { private long launchAMEndTime = 0; private long scheduledTime = 0; private long containerAllocatedTime = 0; + private boolean nonWorkPreservingAMContainerFinished = false; // Set to null initially. Will eventually get set // if an RMAppAttemptUnregistrationEvent occurs @@ -853,7 +854,7 @@ public List pullJustFinishedContainers() { // A new allocate means the AM received the previously sent // finishedContainers. We can ack this to NM now - sendFinishedContainersToNM(); + sendFinishedContainersToNM(finishedContainersSentToAM); // Mark every containerStatus as being sent to AM though we may return // only the ones that belong to the current attempt @@ -1980,12 +1981,13 @@ private void sendFinishedAMContainerToNM(NodeId nodeId, } // Ack NM to remove finished containers from context. - private void sendFinishedContainersToNM() { - for (NodeId nodeId : finishedContainersSentToAM.keySet()) { + private void sendFinishedContainersToNM( + Map> finishedContainers) { + for (NodeId nodeId : finishedContainers.keySet()) { // Clear and get current values List currentSentContainers = - finishedContainersSentToAM.put(nodeId, new ArrayList<>()); + finishedContainers.put(nodeId, new ArrayList<>()); List containerIdList = new ArrayList<>(currentSentContainers.size()); for (ContainerStatus containerStatus : currentSentContainers) { @@ -1994,7 +1996,7 @@ private void sendFinishedContainersToNM() { eventHandler.handle(new RMNodeFinishedContainersPulledByAMEvent(nodeId, containerIdList)); } - this.finishedContainersSentToAM.clear(); + finishedContainers.clear(); } // Add am container to the list so that am container instance will be @@ -2020,7 +2022,16 @@ private static void amContainerFinished(RMAppAttemptImpl appAttempt, appAttempt.finishedContainersSentToAM.putIfAbsent(nodeId, new ArrayList<>()); appAttempt.finishedContainersSentToAM.get(nodeId).add(containerStatus); - appAttempt.sendFinishedContainersToNM(); + appAttempt.sendFinishedContainersToNM( + appAttempt.finishedContainersSentToAM); + // there might be some completed containers that have not been pulled + // by the AM heartbeat, explicitly add them for cleanup. + appAttempt.sendFinishedContainersToNM(appAttempt.justFinishedContainers); + + // mark the fact that AM container has finished so that future finished + // containers will be cleaned up without the engagement of AM containers + // (through heartbeat) + appAttempt.nonWorkPreservingAMContainerFinished = true; } else { appAttempt.sendFinishedAMContainerToNM(nodeId, containerStatus.getContainerId()); @@ -2048,6 +2059,11 @@ private static void addJustFinishedContainer(RMAppAttemptImpl appAttempt, .getNodeId(), new ArrayList<>()); appAttempt.justFinishedContainers.get(containerFinishedEvent .getNodeId()).add(containerFinishedEvent.getContainerStatus()); + + if (appAttempt.nonWorkPreservingAMContainerFinished) { + // AM container has finished, so no more AM heartbeats to do the cleanup. + appAttempt.sendFinishedContainersToNM(appAttempt.justFinishedContainers); + } } private static final class ContainerFinishedAtFinalStateTransition diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java index 004c170262f4f..184bc58a2440d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java @@ -27,7 +27,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java index d3b515e82412f..e6205d2dac6a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java @@ -212,4 +212,8 @@ public interface RMNode { * @return all node attributes as a Set. */ Set getAllNodeAttributes(); + + long calculateHeartBeatInterval(long defaultInterval, + long minInterval, long maxInterval, float speedupFactor, + float slowdownFactor); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index a565fe75656a0..6070d02217ce1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -36,6 +36,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import org.apache.commons.collections.keyvalue.DefaultMapEntry; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -92,7 +93,7 @@ import org.apache.hadoop.yarn.state.StateMachineFactory; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class is used to keep track of all the applications/containers @@ -140,6 +141,9 @@ public class RMNodeImpl implements RMNode, EventHandler { /* Resource utilization for the node. */ private ResourceUtilization nodeUtilization; + /* Track last increment made to Utilization metrics*/ + private Resource lastUtilIncr = Resources.none(); + /** Physical resources in the node. */ private volatile Resource physicalResource; @@ -208,7 +212,8 @@ public class RMNodeImpl implements RMNode, EventHandler { RMNodeEventType, RMNodeEvent>(NodeState.NEW) //Transitions from NEW state - .addTransition(NodeState.NEW, NodeState.RUNNING, + .addTransition(NodeState.NEW, + EnumSet.of(NodeState.RUNNING, NodeState.UNHEALTHY), RMNodeEventType.STARTED, new AddNodeTransition()) .addTransition(NodeState.NEW, NodeState.NEW, RMNodeEventType.RESOURCE_UPDATE, @@ -315,9 +320,6 @@ RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition()) .addTransition(NodeState.DECOMMISSIONING, EnumSet.of( NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONED), RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) - .addTransition(NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONING, - RMNodeEventType.RESOURCE_UPDATE, - new UpdateNodeResourceWhenRunningTransition()) //Transitions from LOST state .addTransition(NodeState.LOST, NodeState.LOST, @@ -400,7 +402,8 @@ public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, this.lastHealthReportTime = System.currentTimeMillis(); this.nodeManagerVersion = nodeManagerVersion; this.timeStamp = 0; - this.physicalResource = physResource; + // If physicalResource is not available, capability is a reasonable guess + this.physicalResource = physResource==null ? capability : physResource; this.latestNodeHeartBeatResponse.setResponseId(0); @@ -549,6 +552,37 @@ public void setAggregatedContainersUtilization( } } + private void clearContributionToUtilizationMetrics() { + ClusterMetrics metrics = ClusterMetrics.getMetrics(); + metrics.decrUtilizedMB(lastUtilIncr.getMemorySize()); + metrics.decrUtilizedVirtualCores(lastUtilIncr.getVirtualCores()); + lastUtilIncr = Resources.none(); + } + + private void updateClusterUtilizationMetrics() { + // Update cluster utilization metrics + ClusterMetrics metrics = ClusterMetrics.getMetrics(); + Resource prevIncr = lastUtilIncr; + + if (this.nodeUtilization == null) { + lastUtilIncr = Resources.none(); + } else { + /* Scale memory contribution based on configured node size */ + long newmem = (long)((float)this.nodeUtilization.getPhysicalMemory() + / Math.max(1.0f, this.getPhysicalResource().getMemorySize()) + * this.getTotalCapability().getMemorySize()); + lastUtilIncr = + Resource.newInstance(newmem, + (int) (this.nodeUtilization.getCPU() + / Math.max(1.0f, this.getPhysicalResource().getVirtualCores()) + * this.getTotalCapability().getVirtualCores())); + } + metrics.incrUtilizedMB(lastUtilIncr.getMemorySize() - + prevIncr.getMemorySize()); + metrics.incrUtilizedVirtualCores(lastUtilIncr.getVirtualCores() - + prevIncr.getVirtualCores()); + } + @Override public ResourceUtilization getNodeUtilization() { this.readLock.lock(); @@ -682,6 +716,48 @@ public void resetLastNodeHeartBeatResponse() { } } + @Override + public long calculateHeartBeatInterval(long defaultInterval, long minInterval, + long maxInterval, float speedupFactor, float slowdownFactor) { + + long newInterval = defaultInterval; + + ClusterMetrics metrics = ClusterMetrics.getMetrics(); + float clusterUtil = metrics.getUtilizedVirtualCores() + / Math.max(1.0f, metrics.getCapabilityVirtualCores()); + + if (this.nodeUtilization != null && this.getPhysicalResource() != null) { + // getCPU() returns utilization normalized to 1 cpu. getVirtualCores() on + // a physicalResource returns number of physical cores. So, + // nodeUtil will be CPU utilization of entire node. + float nodeUtil = this.nodeUtilization.getCPU() + / Math.max(1.0f, this.getPhysicalResource().getVirtualCores()); + + // sanitize + nodeUtil = Math.min(1.0f, Math.max(0.0f, nodeUtil)); + clusterUtil = Math.min(1.0f, Math.max(0.0f, clusterUtil)); + + if (nodeUtil > clusterUtil) { + // Slow down - 20% more CPU utilization means slow down by 20% * factor + newInterval = (long) (defaultInterval + * (1.0f + (nodeUtil - clusterUtil) * slowdownFactor)); + } else { + // Speed up - 20% less CPU utilization means speed up by 20% * factor + newInterval = (long) (defaultInterval + * (1.0f - (clusterUtil - nodeUtil) * speedupFactor)); + } + newInterval = + Math.min(maxInterval, Math.max(minInterval, newInterval)); + + if (LOG.isDebugEnabled()) { + LOG.debug("Setting heartbeatinterval to: " + newInterval + + " node:" + this.nodeId + " nodeUtil: " + nodeUtil + + " clusterUtil: " + clusterUtil); + } + } + return newInterval; + } + public void handle(RMNodeEvent event) { LOG.debug("Processing {} of type {}", event.getNodeId(), event.getType()); writeLock.lock(); @@ -707,7 +783,8 @@ public void handle(RMNodeEvent event) { private void updateMetricsForRejoinedNode(NodeState previousNodeState) { ClusterMetrics metrics = ClusterMetrics.getMetrics(); - metrics.incrNumActiveNodes(); + // Update utilization metrics + this.updateClusterUtilizationMetrics(); switch (previousNodeState) { case LOST: @@ -766,6 +843,8 @@ private void updateMetricsForGracefulDecommission(NodeState initialState, private void updateMetricsForDeactivatedNode(NodeState initialState, NodeState finalState) { ClusterMetrics metrics = ClusterMetrics.getMetrics(); + // Update utilization metrics + clearContributionToUtilizationMetrics(); switch (initialState) { case RUNNING: @@ -850,10 +929,10 @@ private static NodeHealthStatus updateRMNodeFromStatusEvents( } public static class AddNodeTransition implements - SingleArcTransition { + MultipleArcTransition { @Override - public void transition(RMNodeImpl rmNode, RMNodeEvent event) { + public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { // Inform the scheduler RMNodeStartedEvent startEvent = (RMNodeStartedEvent) event; List containers = null; @@ -871,8 +950,6 @@ public void transition(RMNodeImpl rmNode, RMNodeEvent event) { if (previousRMNode != null) { ClusterMetrics.getMetrics().decrDecommisionedNMs(); } - // Increment activeNodes explicitly because this is a new node. - ClusterMetrics.getMetrics().incrNumActiveNodes(); containers = startEvent.getNMContainerStatuses(); if (containers != null && !containers.isEmpty()) { for (NMContainerStatus container : containers) { @@ -889,17 +966,37 @@ public void transition(RMNodeImpl rmNode, RMNodeEvent event) { } } - rmNode.context.getDispatcher().getEventHandler() - .handle(new NodeAddedSchedulerEvent(rmNode, containers)); - rmNode.context.getDispatcher().getEventHandler().handle( - new NodesListManagerEvent( - NodesListManagerEventType.NODE_USABLE, rmNode)); + NodeState nodeState; + NodeStatus nodeStatus = + startEvent.getNodeStatus(); + + if (nodeStatus == null) { + nodeState = NodeState.RUNNING; + reportNodeRunning(rmNode, containers); + } else { + RMNodeStatusEvent rmNodeStatusEvent = + new RMNodeStatusEvent(nodeId, nodeStatus); + + NodeHealthStatus nodeHealthStatus = + updateRMNodeFromStatusEvents(rmNode, rmNodeStatusEvent); + + if (nodeHealthStatus.getIsNodeHealthy()) { + nodeState = NodeState.RUNNING; + reportNodeRunning(rmNode, containers); + } else { + nodeState = NodeState.UNHEALTHY; + reportNodeUnusable(rmNode, nodeState); + } + } + List logAggregationReportsForApps = startEvent.getLogAggregationReportsForApps(); if (logAggregationReportsForApps != null && !logAggregationReportsForApps.isEmpty()) { rmNode.handleLogAggregationStatus(logAggregationReportsForApps); } + + return nodeState; } } @@ -1110,6 +1207,22 @@ public static void deactivateNode(RMNodeImpl rmNode, NodeState finalState) { } } + /** + * Report node is RUNNING. + * @param rmNode + * @param containers + */ + public static void reportNodeRunning(RMNodeImpl rmNode, + List containers) { + rmNode.context.getDispatcher().getEventHandler() + .handle(new NodeAddedSchedulerEvent(rmNode, containers)); + rmNode.context.getDispatcher().getEventHandler().handle( + new NodesListManagerEvent( + NodesListManagerEventType.NODE_USABLE, rmNode)); + // Increment activeNodes explicitly because this is a new node. + ClusterMetrics.getMetrics().incrNumActiveNodes(); + } + /** * Report node is UNUSABLE and update metrics. * @param rmNode @@ -1199,6 +1312,7 @@ public void transition(RMNodeImpl rmNode, RMNodeEvent event) { if (rmNode.originalTotalCapability != null) { rmNode.totalCapability = rmNode.originalTotalCapability; rmNode.originalTotalCapability = null; + rmNode.updatedCapability = true; } LOG.info("Node " + rmNode.nodeId + " in DECOMMISSIONING is " + "recommissioned back to RUNNING."); @@ -1211,6 +1325,13 @@ public void transition(RMNodeImpl rmNode, RMNodeEvent event) { .handle( new NodeResourceUpdateSchedulerEvent(rmNode, ResourceOption .newInstance(rmNode.totalCapability, 0))); + + // Notify NodesListManager to notify all RMApp that this node has been + // recommissioned so that each Application Master can take any required + // actions. + rmNode.context.getDispatcher().getEventHandler().handle( + new NodesListManagerEvent( + NodesListManagerEventType.NODE_USABLE, rmNode)); } } @@ -1227,6 +1348,7 @@ public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { statusEvent.getOpportunisticContainersStatus()); NodeHealthStatus remoteNodeHealthStatus = updateRMNodeFromStatusEvents( rmNode, statusEvent); + rmNode.updateClusterUtilizationMetrics(); NodeState initialState = rmNode.getState(); boolean isNodeDecommissioning = initialState.equals(NodeState.DECOMMISSIONING); @@ -1301,6 +1423,7 @@ public NodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { // notifiers get update metadata because they will very likely query it // upon notification // Update metrics + ClusterMetrics.getMetrics().incrNumActiveNodes(); rmNode.updateMetricsForRejoinedNode(NodeState.UNHEALTHY); return NodeState.RUNNING; } @@ -1341,6 +1464,11 @@ public int getQueueSize() { return nodeUpdateQueue.size(); } + // For test only. + @VisibleForTesting + public Map getUpdatedExistContainers() { + return this.updatedExistContainers; + } // For test only. @VisibleForTesting public Set getLaunchedContainers() { @@ -1459,6 +1587,7 @@ private void handleContainerStatus(List containerStatuses) { } else { // A finished container launchedContainers.remove(containerId); + updatedExistContainers.remove(containerId); if (completedContainers.add(containerId)) { newlyCompletedContainers.add(remoteContainer); } @@ -1472,6 +1601,7 @@ private void handleContainerStatus(List containerStatuses) { findLostContainers(numRemoteRunningContainers, containerStatuses); for (ContainerStatus remoteContainer : lostContainers) { ContainerId containerId = remoteContainer.getContainerId(); + updatedExistContainers.remove(containerId); if (completedContainers.add(containerId)) { newlyCompletedContainers.add(remoteContainer); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java index 397699453fb36..2bf04d0fe76fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java @@ -24,19 +24,23 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; public class RMNodeStartedEvent extends RMNodeEvent { + private final NodeStatus nodeStatus; private List containerStatuses; private List runningApplications; private List logAggregationReportsForApps; public RMNodeStartedEvent(NodeId nodeId, List containerReports, - List runningApplications) { + List runningApplications, + NodeStatus nodeStatus) { super(nodeId, RMNodeEventType.STARTED); this.containerStatuses = containerReports; this.runningApplications = runningApplications; + this.nodeStatus = nodeStatus; } public List getNMContainerStatuses() { @@ -47,6 +51,10 @@ public List getRunningApplications() { return runningApplications; } + public NodeStatus getNodeStatus() { + return nodeStatus; + } + public List getLogAggregationReportsForApps() { return this.logAggregationReportsForApps; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 70d2714c1409c..ae975fb40c0dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -108,8 +108,8 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; @SuppressWarnings("unchecked") @@ -842,6 +842,11 @@ public void updateNodeResource(RMNode nm, writeLock.lock(); try { SchedulerNode node = getSchedulerNode(nm.getNodeID()); + if (node == null) { + LOG.info("Node: " + nm.getNodeID() + " has already been taken out of " + + "scheduling. Skip updating its resource"); + return; + } Resource newResource = resourceOption.getResource(); final int timeout = resourceOption.getOverCommitTimeout(); Resource oldResource = node.getTotalResource(); @@ -884,6 +889,15 @@ public Set getPlanQueues() throws YarnException { + " does not support reservations"); } + /** + * By default placement constraint is disabled. Schedulers which support + * placement constraint can override this value. + * @return enabled or not + */ + public boolean placementConstraintEnabled() { + return false; + } + protected void refreshMaximumAllocation(Resource newMaxAlloc) { nodeTracker.setConfiguredMaxAllocation(newMaxAlloc); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Allocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Allocation.java index ffbb5874acd64..72147c73865ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Allocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Allocation.java @@ -20,7 +20,7 @@ import java.util.List; import java.util.Set; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NMToken; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java index 02bf3767811d5..8e65e6a42e340 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java @@ -39,7 +39,6 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -573,7 +572,7 @@ public boolean isPlaceBlacklisted(String resourceName, public ContainerRequest allocate(NodeType type, SchedulerNode node, SchedulerRequestKey schedulerKey, - Container containerAllocated) { + RMContainer containerAllocated) { writeLock.lock(); try { if (null != containerAllocated) { @@ -731,7 +730,7 @@ public boolean checkAllocation(NodeType type, SchedulerNode node, } private void updateMetricsForAllocatedContainer(NodeType type, - SchedulerNode node, Container containerAllocated) { + SchedulerNode node, RMContainer containerAllocated) { QueueMetrics metrics = queue.getMetrics(); if (pending) { // once an allocation is done we assume the application is @@ -744,15 +743,20 @@ private void updateMetricsForAllocatedContainer(NodeType type, } public static void updateMetrics(ApplicationId applicationId, NodeType type, - SchedulerNode node, Container containerAllocated, String user, + SchedulerNode node, RMContainer containerAllocated, String user, Queue queue) { LOG.debug("allocate: applicationId={} container={} host={} user={}" - + " resource={} type={}", applicationId, containerAllocated.getId(), - containerAllocated.getNodeId(), user, containerAllocated.getResource(), + + " resource={} type={}", applicationId, + containerAllocated.getContainer().getId(), + containerAllocated.getNodeId(), user, + containerAllocated.getContainer().getResource(), type); if(node != null) { queue.getMetrics().allocateResources(node.getPartition(), user, 1, - containerAllocated.getResource(), true); + containerAllocated.getContainer().getResource(), false); + queue.getMetrics().decrPendingResources( + containerAllocated.getNodeLabelExpression(), user, 1, + containerAllocated.getContainer().getResource()); } queue.getMetrics().incrNodeTypeAggregations(user, type); } @@ -831,4 +835,8 @@ public String getDefaultNodeLabelExpression() { this.readLock.unlock(); } } + + public RMContext getRMContext() { + return this.rmContext; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/CSQueueMetricsForCustomResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/CSQueueMetricsForCustomResources.java index 77fe42bb402be..39fc9759807be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/CSQueueMetricsForCustomResources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/CSQueueMetricsForCustomResources.java @@ -17,6 +17,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.metrics.CustomResourceMetricValue; import java.util.Map; @@ -27,10 +28,10 @@ */ public class CSQueueMetricsForCustomResources extends QueueMetricsForCustomResources { - private final QueueMetricsCustomResource guaranteedCapacity = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource maxCapacity = - new QueueMetricsCustomResource(); + private final CustomResourceMetricValue guaranteedCapacity = + new CustomResourceMetricValue(); + private final CustomResourceMetricValue maxCapacity = + new CustomResourceMetricValue(); public void setGuaranteedCapacity(Resource res) { guaranteedCapacity.set(res); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java index 50c45fcce4ef5..ba7420ecb7717 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java @@ -18,8 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -106,6 +107,7 @@ public void addNode(N node) { // Update cluster capacity Resources.addTo(clusterCapacity, node.getTotalResource()); staleClusterCapacity = Resources.clone(clusterCapacity); + ClusterMetrics.getMetrics().incrCapability(node.getTotalResource()); // Update maximumAllocation updateMaxResources(node, true); @@ -201,6 +203,7 @@ public N removeNode(NodeId nodeId) { // Update cluster capacity Resources.subtractFrom(clusterCapacity, node.getTotalResource()); staleClusterCapacity = Resources.clone(clusterCapacity); + ClusterMetrics.getMetrics().decrCapability(node.getTotalResource()); // Update maximumAllocation updateMaxResources(node, false); @@ -493,4 +496,4 @@ public List getNodesPerPartition(String partition) { } return nodesPerPartition; } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java index ab4fc1eb2456f..799140de6608b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java @@ -161,11 +161,17 @@ private void cancelPreviousRequest(SchedulerNode schedulerNode, // Decrement the pending using a dummy RR with // resource = prev update req capability if (pendingAsk != null && pendingAsk.getCount() > 0) { + Container container = Container.newInstance(UNDEFINED, + schedulerNode.getNodeID(), "host:port", + pendingAsk.getPerAllocationResource(), + schedulerKey.getPriority(), null); appSchedulingInfo.allocate(NodeType.OFF_SWITCH, schedulerNode, - schedulerKey, Container.newInstance(UNDEFINED, - schedulerNode.getNodeID(), "host:port", - pendingAsk.getPerAllocationResource(), - schedulerKey.getPriority(), null)); + schedulerKey, + new RMContainerImpl(container, schedulerKey, + appSchedulingInfo.getApplicationAttemptId(), + schedulerNode.getNodeID(), appSchedulingInfo.getUser(), + appSchedulingInfo.getRMContext(), + appPlacementAllocator.getPrimaryRequestedNodePartition())); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/FSQueueMetricsForCustomResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/FSQueueMetricsForCustomResources.java index 2f73d6b79ec10..0139e3c4796aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/FSQueueMetricsForCustomResources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/FSQueueMetricsForCustomResources.java @@ -17,6 +17,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.metrics.CustomResourceMetricValue; import java.util.Map; @@ -26,20 +27,20 @@ * It provides increase and decrease methods for all types of metrics. */ public class FSQueueMetricsForCustomResources { - private final QueueMetricsCustomResource fairShare = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource steadyFairShare = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource minShare = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource maxShare = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource maxAMShare = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource amResourceUsage = - new QueueMetricsCustomResource(); - - public QueueMetricsCustomResource getFairShare() { + private final CustomResourceMetricValue + fairShare = new CustomResourceMetricValue(); + private final CustomResourceMetricValue steadyFairShare = + new CustomResourceMetricValue(); + private final CustomResourceMetricValue + minShare = new CustomResourceMetricValue(); + private final CustomResourceMetricValue + maxShare = new CustomResourceMetricValue(); + private final CustomResourceMetricValue + maxAMShare = new CustomResourceMetricValue(); + private final CustomResourceMetricValue amResourceUsage = + new CustomResourceMetricValue(); + + public CustomResourceMetricValue getFairShare() { return fairShare; } @@ -51,7 +52,7 @@ public Map getFairShareValues() { return fairShare.getValues(); } - public QueueMetricsCustomResource getSteadyFairShare() { + public CustomResourceMetricValue getSteadyFairShare() { return steadyFairShare; } @@ -63,7 +64,7 @@ public Map getSteadyFairShareValues() { return steadyFairShare.getValues(); } - public QueueMetricsCustomResource getMinShare() { + public CustomResourceMetricValue getMinShare() { return minShare; } @@ -75,7 +76,7 @@ public Map getMinShareValues() { return minShare.getValues(); } - public QueueMetricsCustomResource getMaxShare() { + public CustomResourceMetricValue getMaxShare() { return maxShare; } @@ -87,7 +88,7 @@ public Map getMaxShareValues() { return maxShare.getValues(); } - public QueueMetricsCustomResource getMaxAMShare() { + public CustomResourceMetricValue getMaxAMShare() { return maxAMShare; } @@ -99,7 +100,7 @@ public Map getMaxAMShareValues() { return maxAMShare.getValues(); } - public QueueMetricsCustomResource getAMResourceUsage() { + public CustomResourceMetricValue getAMResourceUsage() { return amResourceUsage; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/PartitionQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/PartitionQueueMetrics.java new file mode 100644 index 0000000000000..f43131809a0ca --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/PartitionQueueMetrics.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; + +@Metrics(context = "yarn") +public class PartitionQueueMetrics extends QueueMetrics { + + private String partition; + + protected PartitionQueueMetrics(MetricsSystem ms, String queueName, + Queue parent, boolean enableUserMetrics, Configuration conf, + String partition) { + super(ms, queueName, parent, enableUserMetrics, conf); + this.partition = partition; + if (getParentQueue() != null) { + String newQueueName = (getParentQueue() instanceof CSQueue) + ? ((CSQueue) getParentQueue()).getQueuePath() + : getParentQueue().getQueueName(); + String parentMetricName = + partition + METRIC_NAME_DELIMITER + newQueueName; + setParent(getQueueMetrics().get(parentMetricName)); + } + } + + /** + * Partition * Queue * User Metrics + * + * Computes Metrics at Partition (Node Label) * Queue * User Level. + * + * Sample JMX O/P Structure: + * + * PartitionQueueMetrics (labelX) + * QueueMetrics (A) + * usermetrics + * QueueMetrics (A1) + * usermetrics + * QueueMetrics (A2) + * usermetrics + * QueueMetrics (B) + * usermetrics + * + * @return QueueMetrics + */ + @Override + public synchronized QueueMetrics getUserMetrics(String userName) { + if (users == null) { + return null; + } + + String partitionJMXStr = + (partition.equals(DEFAULT_PARTITION)) ? DEFAULT_PARTITION_JMX_STR + : partition; + + QueueMetrics metrics = (PartitionQueueMetrics) users.get(userName); + if (metrics == null) { + metrics = new PartitionQueueMetrics(this.metricsSystem, this.queueName, + null, false, this.conf, this.partition); + users.put(userName, metrics); + metricsSystem.register( + pSourceName(partitionJMXStr).append(qSourceName(queueName)) + .append(",user=").append(userName).toString(), + "Metrics for user '" + userName + "' in queue '" + queueName + "'", + ((PartitionQueueMetrics) metrics.tag(PARTITION_INFO, partitionJMXStr) + .tag(QUEUE_INFO, queueName)).tag(USER_INFO, userName)); + } + return metrics; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java index 6a428b59377ac..1e42e7a01d958 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java @@ -23,9 +23,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Map; -import java.util.Map.Entry; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; @@ -44,15 +42,16 @@ import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.metrics.CustomResourceMetricValue; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Splitter; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; @InterfaceAudience.Private @Metrics(context="yarn") @@ -112,17 +111,34 @@ public class QueueMetrics implements MetricsSource { info("Queue", "Metrics by queue"); protected static final MetricsInfo USER_INFO = info("User", "Metrics by user"); + protected static final MetricsInfo PARTITION_INFO = + info("Partition", "Metrics by partition"); static final Splitter Q_SPLITTER = Splitter.on('.').omitEmptyStrings().trimResults(); protected final MetricsRegistry registry; protected final String queueName; - protected final QueueMetrics parent; + private QueueMetrics parent; + private final Queue parentQueue; protected final MetricsSystem metricsSystem; protected final Map users; protected final Configuration conf; private QueueMetricsForCustomResources queueMetricsForCustomResources; + private final boolean enableUserMetrics; + + protected static final MetricsInfo P_RECORD_INFO = + info("PartitionQueueMetrics", "Metrics for the resource scheduler"); + + // Use "default" to operate NO_LABEL (default) partition internally + public static final String DEFAULT_PARTITION = "default"; + + // Use "" to register NO_LABEL (default) partition into metrics system + public static final String DEFAULT_PARTITION_JMX_STR = ""; + + // Metric Name Delimiter + public static final String METRIC_NAME_DELIMITER = "."; + private static final String ALLOCATED_RESOURCE_METRIC_PREFIX = "AllocatedResource."; private static final String ALLOCATED_RESOURCE_METRIC_DESC = @@ -148,13 +164,17 @@ public class QueueMetrics implements MetricsSource { private static final String AGGREGATE_PREEMPTED_SECONDS_METRIC_DESC = "Aggregate Preempted Seconds for NAME"; - protected QueueMetrics(MetricsSystem ms, String queueName, Queue parent, - boolean enableUserMetrics, Configuration conf) { + public QueueMetrics(MetricsSystem ms, String queueName, Queue parent, + boolean enableUserMetrics, Configuration conf) { + registry = new MetricsRegistry(RECORD_INFO); this.queueName = queueName; + this.parent = parent != null ? parent.getMetrics() : null; - this.users = enableUserMetrics ? new HashMap() - : null; + this.parentQueue = parent; + this.users = enableUserMetrics ? new HashMap() : null; + this.enableUserMetrics = enableUserMetrics; + metricsSystem = ms; this.conf = conf; runningTime = buildBuckets(conf); @@ -176,12 +196,25 @@ protected static StringBuilder sourceName(String queueName) { return sb; } - public synchronized - static QueueMetrics forQueue(String queueName, Queue parent, - boolean enableUserMetrics, - Configuration conf) { + static StringBuilder pSourceName(String partition) { + StringBuilder sb = new StringBuilder(P_RECORD_INFO.name()); + sb.append(",partition").append('=').append(partition); + return sb; + } + + static StringBuilder qSourceName(String queueName) { + StringBuilder sb = new StringBuilder(); + int i = 0; + for (String node : Q_SPLITTER.split(queueName)) { + sb.append(",q").append(i++).append('=').append(node); + } + return sb; + } + + public synchronized static QueueMetrics forQueue(String queueName, + Queue parent, boolean enableUserMetrics, Configuration conf) { return forQueue(DefaultMetricsSystem.instance(), queueName, parent, - enableUserMetrics, conf); + enableUserMetrics, conf); } /** @@ -203,28 +236,24 @@ public synchronized static void clearQueueMetrics() { * * @return A string to {@link QueueMetrics} map. */ - protected static Map getQueueMetrics() { + public static Map getQueueMetrics() { return QUEUE_METRICS; } - public synchronized - static QueueMetrics forQueue(MetricsSystem ms, String queueName, - Queue parent, boolean enableUserMetrics, - Configuration conf) { - QueueMetrics metrics = QUEUE_METRICS.get(queueName); + public synchronized static QueueMetrics forQueue(MetricsSystem ms, + String queueName, Queue parent, boolean enableUserMetrics, + Configuration conf) { + QueueMetrics metrics = getQueueMetrics().get(queueName); if (metrics == null) { - metrics = - new QueueMetrics(ms, queueName, parent, enableUserMetrics, conf). - tag(QUEUE_INFO, queueName); - + metrics = new QueueMetrics(ms, queueName, parent, enableUserMetrics, conf) + .tag(QUEUE_INFO, queueName); + // Register with the MetricsSystems if (ms != null) { - metrics = - ms.register( - sourceName(queueName).toString(), - "Metrics for queue: " + queueName, metrics); + metrics = ms.register(sourceName(queueName).toString(), + "Metrics for queue: " + queueName, metrics); } - QUEUE_METRICS.put(queueName, metrics); + getQueueMetrics().put(queueName, metrics); } return metrics; @@ -236,7 +265,8 @@ public synchronized QueueMetrics getUserMetrics(String userName) { } QueueMetrics metrics = users.get(userName); if (metrics == null) { - metrics = new QueueMetrics(metricsSystem, queueName, null, false, conf); + metrics = + new QueueMetrics(metricsSystem, queueName, null, false, conf); users.put(userName, metrics); metricsSystem.register( sourceName(queueName).append(",user=").append(userName).toString(), @@ -246,6 +276,96 @@ public synchronized QueueMetrics getUserMetrics(String userName) { return metrics; } + /** + * Partition * Queue Metrics + * + * Computes Metrics at Partition (Node Label) * Queue Level. + * + * Sample JMX O/P Structure: + * + * PartitionQueueMetrics (labelX) + * QueueMetrics (A) + * metrics + * QueueMetrics (A1) + * metrics + * QueueMetrics (A2) + * metrics + * QueueMetrics (B) + * metrics + * + * @param partition + * @return QueueMetrics + */ + public synchronized QueueMetrics getPartitionQueueMetrics(String partition) { + + String partitionJMXStr = partition; + + if ((partition == null) + || (partition.equals(RMNodeLabelsManager.NO_LABEL))) { + partition = DEFAULT_PARTITION; + partitionJMXStr = DEFAULT_PARTITION_JMX_STR; + } + + String metricName = partition + METRIC_NAME_DELIMITER + this.queueName; + QueueMetrics metrics = getQueueMetrics().get(metricName); + + if (metrics == null) { + QueueMetrics queueMetrics = + new PartitionQueueMetrics(metricsSystem, this.queueName, parentQueue, + this.enableUserMetrics, this.conf, partition); + metricsSystem.register( + pSourceName(partitionJMXStr).append(qSourceName(this.queueName)) + .toString(), + "Metrics for queue: " + this.queueName, + queueMetrics.tag(PARTITION_INFO, partitionJMXStr).tag(QUEUE_INFO, + this.queueName)); + getQueueMetrics().put(metricName, queueMetrics); + return queueMetrics; + } else { + return metrics; + } + } + + /** + * Partition Metrics + * + * Computes Metrics at Partition (Node Label) Level. + * + * Sample JMX O/P Structure: + * + * PartitionQueueMetrics (labelX) + * metrics + * + * @param partition + * @return QueueMetrics + */ + private QueueMetrics getPartitionMetrics(String partition) { + + String partitionJMXStr = partition; + if ((partition == null) + || (partition.equals(RMNodeLabelsManager.NO_LABEL))) { + partition = DEFAULT_PARTITION; + partitionJMXStr = DEFAULT_PARTITION_JMX_STR; + } + + String metricName = partition + METRIC_NAME_DELIMITER; + QueueMetrics metrics = getQueueMetrics().get(metricName); + if (metrics == null) { + metrics = new PartitionQueueMetrics(metricsSystem, this.queueName, null, + false, this.conf, partition); + + // Register with the MetricsSystems + if (metricsSystem != null) { + metricsSystem.register(pSourceName(partitionJMXStr).toString(), + "Metrics for partition: " + partitionJMXStr, + (PartitionQueueMetrics) metrics.tag(PARTITION_INFO, + partitionJMXStr)); + } + getQueueMetrics().put(metricName, metrics); + } + return metrics; + } + private ArrayList parseInts(String value) { ArrayList result = new ArrayList(); for(String s: value.split(",")) { @@ -386,20 +506,42 @@ public void moveAppTo(AppSchedulingInfo app) { */ public void setAvailableResourcesToQueue(String partition, Resource limit) { if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { - availableMB.set(limit.getMemorySize()); - availableVCores.set(limit.getVirtualCores()); - if (queueMetricsForCustomResources != null) { - queueMetricsForCustomResources.setAvailable(limit); - registerCustomResources( - queueMetricsForCustomResources.getAvailableValues(), - AVAILABLE_RESOURCE_METRIC_PREFIX, AVAILABLE_RESOURCE_METRIC_DESC); + setAvailableResources(limit); + } + + QueueMetrics partitionQueueMetrics = getPartitionQueueMetrics(partition); + if (partitionQueueMetrics != null) { + partitionQueueMetrics.setAvailableResources(limit); + + if(this.queueName.equals("root")) { + QueueMetrics partitionMetrics = getPartitionMetrics(partition); + if (partitionMetrics != null) { + partitionMetrics.setAvailableResources(limit); + } } } } + /** + * Set Available resources with support for resource vectors. + * + * @param limit + */ + public void setAvailableResources(Resource limit) { + availableMB.set(limit.getMemorySize()); + availableVCores.set(limit.getVirtualCores()); + if (queueMetricsForCustomResources != null) { + queueMetricsForCustomResources.setAvailable(limit); + queueMetricsForCustomResources.registerCustomResources( + queueMetricsForCustomResources.getAvailableValues(), registry, + AVAILABLE_RESOURCE_METRIC_PREFIX, AVAILABLE_RESOURCE_METRIC_DESC); + } + } + /** * Set available resources. To be called by scheduler periodically as * resources become available. + * * @param limit resource limit */ public void setAvailableResourcesToQueue(Resource limit) { @@ -409,53 +551,71 @@ public void setAvailableResourcesToQueue(Resource limit) { /** * Set available resources. To be called by scheduler periodically as * resources become available. + * * @param partition Node Partition * @param user * @param limit resource limit */ - public void setAvailableResourcesToUser(String partition, - String user, Resource limit) { - if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { + public void setAvailableResourcesToUser(String partition, String user, + Resource limit) { + if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { - userMetrics.setAvailableResourcesToQueue(partition, limit); + userMetrics.setAvailableResources(limit); + } + } + + QueueMetrics partitionQueueMetrics = getPartitionQueueMetrics(partition); + if (partitionQueueMetrics != null) { + QueueMetrics partitionUserMetrics = + partitionQueueMetrics.getUserMetrics(user); + if (partitionUserMetrics != null) { + partitionUserMetrics.setAvailableResources(limit); } } } /** * Increment pending resource metrics + * * @param partition Node Partition * @param user * @param containers - * @param res the TOTAL delta of resources note this is different from - * the other APIs which use per container resource + * @param res the TOTAL delta of resources note this is different from the + * other APIs which use per container resource */ public void incrPendingResources(String partition, String user, int containers, Resource res) { + if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { - _incrPendingResources(containers, res); - QueueMetrics userMetrics = getUserMetrics(user); - if (userMetrics != null) { - userMetrics.incrPendingResources(partition, user, containers, res); - } - if (parent != null) { - parent.incrPendingResources(partition, user, containers, res); + internalIncrPendingResources(partition, user, containers, res); + } + + QueueMetrics partitionQueueMetrics = getPartitionQueueMetrics(partition); + if (partitionQueueMetrics != null) { + partitionQueueMetrics.internalIncrPendingResources(partition, user, + containers, res); + QueueMetrics partitionMetrics = getPartitionMetrics(partition); + if (partitionMetrics != null) { + partitionMetrics.incrementPendingResources(containers, res); } } } - protected Map initAndGetCustomResources() { - Map customResources = new HashMap(); - ResourceInformation[] resources = ResourceUtils.getResourceTypesArray(); - - for (int i = 2; i < resources.length; i++) { - ResourceInformation resource = resources[i]; - customResources.put(resource.getName(), Long.valueOf(0)); + public void internalIncrPendingResources(String partition, String user, + int containers, Resource res) { + incrementPendingResources(containers, res); + QueueMetrics userMetrics = getUserMetrics(user); + if (userMetrics != null) { + userMetrics.internalIncrPendingResources(partition, user, containers, + res); + } + if (parent != null) { + parent.internalIncrPendingResources(partition, user, containers, res); } - return customResources; } + protected void createQueueMetricsForCustomResources() { if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) { this.queueMetricsForCustomResources = @@ -464,77 +624,74 @@ protected void createQueueMetricsForCustomResources() { } } - /** - * Register all custom resources metrics as part of initialization. As and - * when this metric object construction happens for any queue, all custom - * resource metrics value would be initialized with '0' like any other - * mandatory resources metrics - */ protected void registerCustomResources() { - Map customResources = initAndGetCustomResources(); - registerCustomResources(customResources, ALLOCATED_RESOURCE_METRIC_PREFIX, - ALLOCATED_RESOURCE_METRIC_DESC); - registerCustomResources(customResources, AVAILABLE_RESOURCE_METRIC_PREFIX, - AVAILABLE_RESOURCE_METRIC_DESC); - registerCustomResources(customResources, PENDING_RESOURCE_METRIC_PREFIX, - PENDING_RESOURCE_METRIC_DESC); - registerCustomResources(customResources, RESERVED_RESOURCE_METRIC_PREFIX, - RESERVED_RESOURCE_METRIC_DESC); - registerCustomResources(customResources, - AGGREGATE_PREEMPTED_SECONDS_METRIC_PREFIX, - AGGREGATE_PREEMPTED_SECONDS_METRIC_DESC); - } - - protected void registerCustomResources(Map customResources, - String metricPrefix, String metricDesc) { - for (Entry entry : customResources.entrySet()) { - String resourceName = entry.getKey(); - Long resourceValue = entry.getValue(); - - MutableGaugeLong resourceMetric = - (MutableGaugeLong) this.registry.get(metricPrefix + resourceName); - - if (resourceMetric == null) { - resourceMetric = - this.registry.newGauge(metricPrefix + resourceName, - metricDesc.replace("NAME", resourceName), 0L); - } - resourceMetric.set(resourceValue); - } + Map customResources = + queueMetricsForCustomResources.initAndGetCustomResources(); + queueMetricsForCustomResources + .registerCustomResources(customResources, this.registry); + queueMetricsForCustomResources + .registerCustomResources(customResources, this.registry, + PENDING_RESOURCE_METRIC_PREFIX, PENDING_RESOURCE_METRIC_DESC); + queueMetricsForCustomResources + .registerCustomResources(customResources, this.registry, + RESERVED_RESOURCE_METRIC_PREFIX, RESERVED_RESOURCE_METRIC_DESC); + queueMetricsForCustomResources + .registerCustomResources(customResources, this.registry, + AGGREGATE_PREEMPTED_SECONDS_METRIC_PREFIX, + AGGREGATE_PREEMPTED_SECONDS_METRIC_DESC); } - private void _incrPendingResources(int containers, Resource res) { + private void incrementPendingResources(int containers, Resource res) { pendingContainers.incr(containers); pendingMB.incr(res.getMemorySize() * containers); pendingVCores.incr(res.getVirtualCores() * containers); if (queueMetricsForCustomResources != null) { queueMetricsForCustomResources.increasePending(res, containers); - registerCustomResources(queueMetricsForCustomResources.getPendingValues(), + queueMetricsForCustomResources.registerCustomResources( + queueMetricsForCustomResources.getPendingValues(), this.registry, PENDING_RESOURCE_METRIC_PREFIX, PENDING_RESOURCE_METRIC_DESC); } } public void decrPendingResources(String partition, String user, int containers, Resource res) { + if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { - _decrPendingResources(containers, res); - QueueMetrics userMetrics = getUserMetrics(user); - if (userMetrics != null) { - userMetrics.decrPendingResources(partition, user, containers, res); - } - if (parent != null) { - parent.decrPendingResources(partition, user, containers, res); + internalDecrPendingResources(partition, user, containers, res); + } + + QueueMetrics partitionQueueMetrics = getPartitionQueueMetrics(partition); + if (partitionQueueMetrics != null) { + partitionQueueMetrics.internalDecrPendingResources(partition, user, + containers, res); + QueueMetrics partitionMetrics = getPartitionMetrics(partition); + if (partitionMetrics != null) { + partitionMetrics.decrementPendingResources(containers, res); } } } - private void _decrPendingResources(int containers, Resource res) { + protected void internalDecrPendingResources(String partition, String user, + int containers, Resource res) { + decrementPendingResources(containers, res); + QueueMetrics userMetrics = getUserMetrics(user); + if (userMetrics != null) { + userMetrics.internalDecrPendingResources(partition, user, containers, + res); + } + if (parent != null) { + parent.internalDecrPendingResources(partition, user, containers, res); + } + } + + private void decrementPendingResources(int containers, Resource res) { pendingContainers.decr(containers); pendingMB.decr(res.getMemorySize() * containers); pendingVCores.decr(res.getVirtualCores() * containers); if (queueMetricsForCustomResources != null) { queueMetricsForCustomResources.decreasePending(res, containers); - registerCustomResources(queueMetricsForCustomResources.getPendingValues(), + queueMetricsForCustomResources.registerCustomResources( + queueMetricsForCustomResources.getPendingValues(), this.registry, PENDING_RESOURCE_METRIC_PREFIX, PENDING_RESOURCE_METRIC_DESC); } } @@ -558,35 +715,62 @@ public void incrNodeTypeAggregations(String user, NodeType type) { } } - public void allocateResources(String partition, String user, - int containers, Resource res, boolean decrPending) { + public void allocateResources(String partition, String user, int containers, + Resource res, boolean decrPending) { + if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { - allocatedContainers.incr(containers); - aggregateContainersAllocated.incr(containers); - - allocatedMB.incr(res.getMemorySize() * containers); - allocatedVCores.incr(res.getVirtualCores() * containers); - if (queueMetricsForCustomResources != null) { - queueMetricsForCustomResources.increaseAllocated(res, containers); - registerCustomResources( - queueMetricsForCustomResources.getAllocatedValues(), - ALLOCATED_RESOURCE_METRIC_PREFIX, ALLOCATED_RESOURCE_METRIC_DESC); - } + internalAllocateResources(partition, user, containers, res, decrPending); + } - if (decrPending) { - _decrPendingResources(containers, res); - } - QueueMetrics userMetrics = getUserMetrics(user); - if (userMetrics != null) { - userMetrics.allocateResources(partition, user, - containers, res, decrPending); - } - if (parent != null) { - parent.allocateResources(partition, user, containers, res, decrPending); + QueueMetrics partitionQueueMetrics = getPartitionQueueMetrics(partition); + if (partitionQueueMetrics != null) { + partitionQueueMetrics.internalAllocateResources(partition, user, + containers, res, decrPending); + QueueMetrics partitionMetrics = getPartitionMetrics(partition); + if (partitionMetrics != null) { + partitionMetrics.computeAllocateResources(containers, res, decrPending); } } } + public void internalAllocateResources(String partition, String user, + int containers, Resource res, boolean decrPending) { + computeAllocateResources(containers, res, decrPending); + QueueMetrics userMetrics = getUserMetrics(user); + if (userMetrics != null) { + userMetrics.internalAllocateResources(partition, user, containers, res, + decrPending); + } + if (parent != null) { + parent.internalAllocateResources(partition, user, containers, res, + decrPending); + } + } + + /** + * Allocate Resources for a partition with support for resource vectors. + * + * @param containers number of containers + * @param res resource containing memory size, vcores etc + * @param decrPending decides whether to decrease pending resource or not + */ + private void computeAllocateResources(int containers, Resource res, + boolean decrPending) { + allocatedContainers.incr(containers); + aggregateContainersAllocated.incr(containers); + allocatedMB.incr(res.getMemorySize() * containers); + allocatedVCores.incr(res.getVirtualCores() * containers); + if (queueMetricsForCustomResources != null) { + queueMetricsForCustomResources.increaseAllocated(res, containers); + queueMetricsForCustomResources.registerCustomResources( + queueMetricsForCustomResources.getAllocatedValues(), this.registry, + ALLOCATED_RESOURCE_METRIC_PREFIX, ALLOCATED_RESOURCE_METRIC_DESC); + } + if (decrPending) { + decrementPendingResources(containers, res); + } + } + /** * Allocate Resource for container size change. * @param partition Node Partition @@ -594,81 +778,80 @@ public void allocateResources(String partition, String user, * @param res */ public void allocateResources(String partition, String user, Resource res) { - if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { - allocatedMB.incr(res.getMemorySize()); - allocatedVCores.incr(res.getVirtualCores()); - if (queueMetricsForCustomResources != null) { - queueMetricsForCustomResources.increaseAllocated(res); - registerCustomResources( - queueMetricsForCustomResources.getAllocatedValues(), - ALLOCATED_RESOURCE_METRIC_PREFIX, ALLOCATED_RESOURCE_METRIC_DESC); - } + allocatedMB.incr(res.getMemorySize()); + allocatedVCores.incr(res.getVirtualCores()); + if (queueMetricsForCustomResources != null) { + queueMetricsForCustomResources.increaseAllocated(res); + queueMetricsForCustomResources.registerCustomResources( + queueMetricsForCustomResources.getAllocatedValues(), this.registry, + ALLOCATED_RESOURCE_METRIC_PREFIX, ALLOCATED_RESOURCE_METRIC_DESC); + } - pendingMB.decr(res.getMemorySize()); - pendingVCores.decr(res.getVirtualCores()); - if (queueMetricsForCustomResources != null) { - queueMetricsForCustomResources.decreasePending(res); - registerCustomResources( - queueMetricsForCustomResources.getPendingValues(), - PENDING_RESOURCE_METRIC_PREFIX, PENDING_RESOURCE_METRIC_DESC); - } + pendingMB.decr(res.getMemorySize()); + pendingVCores.decr(res.getVirtualCores()); + if (queueMetricsForCustomResources != null) { + queueMetricsForCustomResources.decreasePending(res); + queueMetricsForCustomResources.registerCustomResources( + queueMetricsForCustomResources.getPendingValues(), this.registry, + PENDING_RESOURCE_METRIC_PREFIX, PENDING_RESOURCE_METRIC_DESC); + } - QueueMetrics userMetrics = getUserMetrics(user); - if (userMetrics != null) { - userMetrics.allocateResources(partition, user, res); - } - if (parent != null) { - parent.allocateResources(partition, user, res); - } + QueueMetrics userMetrics = getUserMetrics(user); + if (userMetrics != null) { + userMetrics.allocateResources(partition, user, res); + } + if (parent != null) { + parent.allocateResources(partition, user, res); } } - public void releaseResources(String partition, - String user, int containers, Resource res) { + public void releaseResources(String partition, String user, int containers, + Resource res) { + if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { - allocatedContainers.decr(containers); - aggregateContainersReleased.incr(containers); - allocatedMB.decr(res.getMemorySize() * containers); - allocatedVCores.decr(res.getVirtualCores() * containers); - if (queueMetricsForCustomResources != null) { - queueMetricsForCustomResources.decreaseAllocated(res, containers); - registerCustomResources( - queueMetricsForCustomResources.getAllocatedValues(), - ALLOCATED_RESOURCE_METRIC_PREFIX, ALLOCATED_RESOURCE_METRIC_DESC); - } + internalReleaseResources(partition, user, containers, res); + } - QueueMetrics userMetrics = getUserMetrics(user); - if (userMetrics != null) { - userMetrics.releaseResources(partition, user, containers, res); - } - if (parent != null) { - parent.releaseResources(partition, user, containers, res); + QueueMetrics partitionQueueMetrics = getPartitionQueueMetrics(partition); + if (partitionQueueMetrics != null) { + partitionQueueMetrics.internalReleaseResources(partition, user, + containers, res); + QueueMetrics partitionMetrics = getPartitionMetrics(partition); + if (partitionMetrics != null) { + partitionMetrics.computeReleaseResources(containers, res); } } } - /** - * Release Resource for container size change. - * - * @param user - * @param res - */ - private void releaseResources(String user, Resource res) { - allocatedMB.decr(res.getMemorySize()); - allocatedVCores.decr(res.getVirtualCores()); - if (queueMetricsForCustomResources != null) { - queueMetricsForCustomResources.decreaseAllocated(res); - registerCustomResources( - queueMetricsForCustomResources.getAllocatedValues(), - ALLOCATED_RESOURCE_METRIC_PREFIX, ALLOCATED_RESOURCE_METRIC_DESC); - } + public void internalReleaseResources(String partition, String user, + int containers, Resource res) { + computeReleaseResources(containers, res); QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { - userMetrics.releaseResources(user, res); + userMetrics.internalReleaseResources(partition, user, containers, res); } if (parent != null) { - parent.releaseResources(user, res); + parent.internalReleaseResources(partition, user, containers, res); + } + } + + /** + * Release Resources for a partition with support for resource vectors. + * + * @param containers number of containers + * @param res resource containing memory size, vcores etc + */ + private void computeReleaseResources(int containers, Resource res) { + allocatedContainers.decr(containers); + aggregateContainersReleased.incr(containers); + allocatedMB.decr(res.getMemorySize() * containers); + allocatedVCores.decr(res.getVirtualCores() * containers); + if (queueMetricsForCustomResources != null) { + queueMetricsForCustomResources.decreaseAllocated(res, containers); + queueMetricsForCustomResources.registerCustomResources( + queueMetricsForCustomResources.getAllocatedValues(), this.registry, + ALLOCATED_RESOURCE_METRIC_PREFIX, ALLOCATED_RESOURCE_METRIC_DESC); } } @@ -715,9 +898,9 @@ public void updatePreemptedSecondsForCustomResources(Resource res, if (queueMetricsForCustomResources != null) { queueMetricsForCustomResources .increaseAggregatedPreemptedSeconds(res, seconds); - registerCustomResources( + queueMetricsForCustomResources.registerCustomResources( queueMetricsForCustomResources.getAggregatePreemptedSeconds() - .getValues(), + .getValues(), this.registry, AGGREGATE_PREEMPTED_SECONDS_METRIC_PREFIX, AGGREGATE_PREEMPTED_SECONDS_METRIC_DESC); } @@ -728,52 +911,79 @@ public void updatePreemptedSecondsForCustomResources(Resource res, public void reserveResource(String partition, String user, Resource res) { if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { - reserveResource(user, res); + internalReserveResources(partition, user, res); + } + QueueMetrics partitionQueueMetrics = getPartitionQueueMetrics(partition); + if (partitionQueueMetrics != null) { + partitionQueueMetrics.internalReserveResources(partition, user, res); + QueueMetrics partitionMetrics = getPartitionMetrics(partition); + if (partitionMetrics != null) { + partitionMetrics.incrReserveResources(res); + } + } + } + + protected void internalReserveResources(String partition, String user, + Resource res) { + incrReserveResources(res); + QueueMetrics userMetrics = getUserMetrics(user); + if (userMetrics != null) { + userMetrics.internalReserveResources(partition, user, res); + } + if (parent != null) { + parent.internalReserveResources(partition, user, res); } } - public void reserveResource(String user, Resource res) { + public void incrReserveResources(Resource res) { reservedContainers.incr(); reservedMB.incr(res.getMemorySize()); reservedVCores.incr(res.getVirtualCores()); if (queueMetricsForCustomResources != null) { queueMetricsForCustomResources.increaseReserved(res); - registerCustomResources( - queueMetricsForCustomResources.getReservedValues(), + queueMetricsForCustomResources.registerCustomResources( + queueMetricsForCustomResources.getReservedValues(), this.registry, RESERVED_RESOURCE_METRIC_PREFIX, RESERVED_RESOURCE_METRIC_DESC); } + } + + public void unreserveResource(String partition, String user, Resource res) { + if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { + internalUnReserveResources(partition, user, res); + } + QueueMetrics partitionQueueMetrics = getPartitionQueueMetrics(partition); + if (partitionQueueMetrics != null) { + partitionQueueMetrics.internalUnReserveResources(partition, user, res); + QueueMetrics partitionMetrics = getPartitionMetrics(partition); + if (partitionMetrics != null) { + partitionMetrics.decrReserveResource(res); + } + } + } + + protected void internalUnReserveResources(String partition, String user, + Resource res) { + decrReserveResource(res); QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { - userMetrics.reserveResource(user, res); + userMetrics.internalUnReserveResources(partition, user, res); } if (parent != null) { - parent.reserveResource(user, res); + parent.internalUnReserveResources(partition, user, res); } } - private void unreserveResource(String user, Resource res) { - reservedContainers.decr(); + public void decrReserveResource(Resource res) { + int containers = 1; + reservedContainers.decr(containers); reservedMB.decr(res.getMemorySize()); reservedVCores.decr(res.getVirtualCores()); if (queueMetricsForCustomResources != null) { queueMetricsForCustomResources.decreaseReserved(res); - registerCustomResources( - queueMetricsForCustomResources.getReservedValues(), + queueMetricsForCustomResources.registerCustomResources( + queueMetricsForCustomResources.getReservedValues(), this.registry, RESERVED_RESOURCE_METRIC_PREFIX, RESERVED_RESOURCE_METRIC_DESC); } - QueueMetrics userMetrics = getUserMetrics(user); - if (userMetrics != null) { - userMetrics.unreserveResource(user, res); - } - if (parent != null) { - parent.unreserveResource(user, res); - } - } - - public void unreserveResource(String partition, String user, Resource res) { - if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { - unreserveResource(user, res); - } } public void incrActiveUsers() { @@ -874,7 +1084,7 @@ public Resource getReservedResources() { * @return QueueMetricsCustomResource */ @VisibleForTesting - public QueueMetricsCustomResource getAggregatedPreemptedSecondsResources() { + public CustomResourceMetricValue getAggregatedPreemptedSecondsResources() { return queueMetricsForCustomResources.getAggregatePreemptedSeconds(); } @@ -992,7 +1202,7 @@ public long getAggregatePreemptedContainers() { public void fillInValuesFromAvailableResources(Resource fromResource, Resource targetResource) { if (queueMetricsForCustomResources != null) { - QueueMetricsCustomResource availableResources = + CustomResourceMetricValue availableResources = queueMetricsForCustomResources.getAvailable(); // We expect all custom resources contained in availableResources, @@ -1017,8 +1227,16 @@ public QueueMetricsForCustomResources getQueueMetricsForCustomResources() { return this.queueMetricsForCustomResources; } - public void setQueueMetricsForCustomResources( + protected void setQueueMetricsForCustomResources( QueueMetricsForCustomResources metrics) { this.queueMetricsForCustomResources = metrics; } -} + + public void setParent(QueueMetrics parent) { + this.parent = parent; + } + + public Queue getParentQueue() { + return parentQueue; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsForCustomResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsForCustomResources.java index ddfffc8d13bc6..cd970a7eb12ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsForCustomResources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetricsForCustomResources.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at + * * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,28 +19,20 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.metrics.CustomResourceMetrics; +import org.apache.hadoop.yarn.metrics.CustomResourceMetricValue; import java.util.Map; -/** - * This class is a main entry-point for any kind of metrics for - * custom resources. - * It provides increase and decrease methods for all types of metrics. - */ -public class QueueMetricsForCustomResources { - private final QueueMetricsCustomResource aggregatePreemptedSeconds = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource aggregatePreempted = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource allocated = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource available = - new QueueMetricsCustomResource(); - private final QueueMetricsCustomResource pending = - new QueueMetricsCustomResource(); - - private final QueueMetricsCustomResource reserved = - new QueueMetricsCustomResource(); +public class QueueMetricsForCustomResources extends CustomResourceMetrics { + private final CustomResourceMetricValue aggregatePreemptedSeconds = + new CustomResourceMetricValue(); + private final CustomResourceMetricValue aggregatePreempted = + new CustomResourceMetricValue(); + private final CustomResourceMetricValue pending = + new CustomResourceMetricValue(); + private final CustomResourceMetricValue reserved = + new CustomResourceMetricValue(); public void increaseReserved(Resource res) { reserved.increase(res); @@ -48,10 +42,6 @@ public void decreaseReserved(Resource res) { reserved.decrease(res); } - public void setAvailable(Resource res) { - available.set(res); - } - public void increasePending(Resource res, int containers) { pending.increaseWithMultiplier(res, containers); } @@ -64,20 +54,12 @@ public void decreasePending(Resource res, int containers) { pending.decreaseWithMultiplier(res, containers); } - public void increaseAllocated(Resource res) { - allocated.increase(res); - } - - public void increaseAllocated(Resource res, int containers) { - allocated.increaseWithMultiplier(res, containers); - } - - public void decreaseAllocated(Resource res) { - allocated.decrease(res); + public Map getPendingValues() { + return pending.getValues(); } - public void decreaseAllocated(Resource res, int containers) { - allocated.decreaseWithMultiplier(res, containers); + public Map getReservedValues() { + return reserved.getValues(); } public void increaseAggregatedPreemptedSeconds(Resource res, long seconds) { @@ -88,27 +70,7 @@ public void increaseAggregatedPreempted(Resource res) { aggregatePreempted.increase(res); } - Map getAllocatedValues() { - return allocated.getValues(); - } - - Map getAvailableValues() { - return available.getValues(); - } - - Map getPendingValues() { - return pending.getValues(); - } - - Map getReservedValues() { - return reserved.getValues(); - } - - QueueMetricsCustomResource getAggregatePreemptedSeconds() { + CustomResourceMetricValue getAggregatePreemptedSeconds() { return aggregatePreemptedSeconds; } - - public QueueMetricsCustomResource getAvailable() { - return available; - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java index c46c9112ea8fd..18fd6c3567dc0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java @@ -200,6 +200,10 @@ public Resource getAllUsed() { return _getAll(ResourceType.USED); } + public Resource getAllReserved() { + return _getAll(ResourceType.RESERVED); + } + // Cache Used public Resource getCachedUsed() { return _get(NL, ResourceType.CACHED_USED); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 657c03cfd1541..649fabd796a9e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -90,9 +90,9 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.ConcurrentHashMultiset; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ConcurrentHashMultiset; /** * Represents an application attempt from the viewpoint of the scheduler. @@ -1126,8 +1126,8 @@ public ApplicationResourceUsageReport getResourceUsageReport() { getRunningAggregateAppResourceUsage(); Resource usedResourceClone = Resources.clone( attemptResourceUsage.getAllUsed()); - Resource reservedResourceClone = Resources.clone( - attemptResourceUsage.getReserved()); + Resource reservedResourceClone = + Resources.clone(attemptResourceUsage.getAllReserved()); Resource cluster = rmContext.getScheduler().getClusterResource(); ResourceCalculator calc = rmContext.getScheduler().getResourceCalculator(); @@ -1138,7 +1138,7 @@ public ApplicationResourceUsageReport getResourceUsageReport() { .put(ResourceInformation.VCORES.getName(), 0L); float queueUsagePerc = 0.0f; float clusterUsagePerc = 0.0f; - if (!calc.isInvalidDivisor(cluster)) { + if (!calc.isAllInvalidDivisor(cluster)) { float queueCapacityPerc = queue.getQueueInfo(false, false) .getCapacity(); queueUsagePerc = calc.divide(cluster, usedResourceClone, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index ef03aadf1a000..468f6217ecd94 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -26,7 +26,7 @@ import java.util.Map; import java.util.Set; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.builder.CompareToBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,7 +50,7 @@ import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java index fa71a257343f9..e27fd623ffcce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceUtilization; /** * Node usage report. @@ -30,12 +31,14 @@ public class SchedulerNodeReport { private final Resource used; private final Resource avail; + private final ResourceUtilization utilization; private final int num; public SchedulerNodeReport(SchedulerNode node) { this.used = node.getAllocatedResource(); this.avail = node.getUnallocatedResource(); this.num = node.getNumContainers(); + this.utilization = node.getNodeUtilization(); } /** @@ -58,4 +61,12 @@ public Resource getAvailableResource() { public int getNumContainers() { return num; } + + /** + * + * @return utilization of this node + */ + public ResourceUtilization getUtilization() { + return utilization; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index 7ec1c33346b21..58e25979d17cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -23,9 +23,9 @@ import java.util.Map; import java.util.Set; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java index 5c491383a1727..5f072f703318b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java @@ -51,7 +51,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; -import com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; /** * This interface is used by the components to talk to the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index cc02ff688ccb8..8d7ec2a09053f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 9177430f52e28..fda5e169e60b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -27,7 +27,7 @@ import java.util.Set; import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,7 +74,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.UNDEFINED; @@ -146,6 +146,7 @@ protected enum CapacityConfigType { volatile Priority priority = Priority.newInstance(0); private Map userWeights = new HashMap(); + private int maxParallelApps; public AbstractCSQueue(CapacitySchedulerContext cs, String queueName, CSQueue parent, CSQueue old) throws IOException { @@ -390,6 +391,11 @@ protected void setupQueueConfigs(Resource clusterResource, // and queue setting setupMaximumAllocation(configuration); + // Max parallel apps + int queueMaxParallelApps = + configuration.getMaxParallelAppsForQueue(getQueuePath()); + setMaxParallelApps(queueMaxParallelApps); + // initialized the queue state based on previous state, configured state // and its parent state. QueueState previous = getState(); @@ -1060,14 +1066,12 @@ boolean canAssignToThisQueue(Resource clusterResource, if (Resources.greaterThanOrEqual(resourceCalculator, clusterResource, usedExceptKillable, currentLimitResource)) { - // if reservation continous looking enabled, check to see if could we + // if reservation continue looking enabled, check to see if could we // potentially use this node instead of a reserved node if the application // has reserved containers. - // TODO, now only consider reservation cases when the node has no label - if (this.reservationsContinueLooking && nodePartition.equals( - RMNodeLabelsManager.NO_LABEL) && Resources.greaterThan( - resourceCalculator, clusterResource, resourceCouldBeUnreserved, - Resources.none())) { + if (this.reservationsContinueLooking + && Resources.greaterThan(resourceCalculator, clusterResource, + resourceCouldBeUnreserved, Resources.none())) { // resource-without-reserved = used - reserved Resource newTotalWithoutReservedResource = Resources.subtract( usedExceptKillable, resourceCouldBeUnreserved); @@ -1349,10 +1353,6 @@ public void activeQueue() throws YarnException { if (getState() == QueueState.RUNNING) { LOG.info("The specified queue:" + getQueuePath() + " is already in the RUNNING state."); - } else if (getState() == QueueState.DRAINING) { - throw new YarnException( - "The queue:" + getQueuePath() + " is in the Stopping process. " - + "Please wait for the queue getting fully STOPPED."); } else { CSQueue parent = getParent(); if (parent == null || parent.getState() == QueueState.RUNNING) { @@ -1425,4 +1425,14 @@ public long getDefaultApplicationLifetime() { public boolean getDefaultAppLifetimeWasSpecifiedInConfig() { return defaultAppLifetimeWasSpecifiedInConfig; } + + public void setMaxParallelApps(int maxParallelApps) { + this.maxParallelApps = maxParallelApps; + } + + public int getMaxParallelApps() { + return maxParallelApps; + } + + abstract int getNumRunnableApps(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSMaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSMaxRunningAppsEnforcer.java new file mode 100644 index 0000000000000..93d001773138f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSMaxRunningAppsEnforcer.java @@ -0,0 +1,436 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ListMultimap; + +/** + * Handles tracking and enforcement for user and queue maxRunningApps + * constraints. + */ +public class CSMaxRunningAppsEnforcer { + private static final Logger LOG = LoggerFactory.getLogger( + CSMaxRunningAppsEnforcer.class); + + private final CapacityScheduler scheduler; + + // Tracks the number of running applications by user. + private final Map usersNumRunnableApps; + + private final ListMultimap usersNonRunnableApps; + + public CSMaxRunningAppsEnforcer(CapacityScheduler scheduler) { + this.scheduler = scheduler; + this.usersNumRunnableApps = new HashMap(); + this.usersNonRunnableApps = ArrayListMultimap.create(); + } + + /** + * Checks whether making the application runnable would exceed any + * maxRunningApps limits. Also sets the "runnable" flag on the + * attempt. + * + * @param attempt the app attempt being checked + * @return true if the application is runnable; false otherwise + */ + public boolean checkRunnabilityWithUpdate( + FiCaSchedulerApp attempt) { + boolean attemptCanRun = !exceedUserMaxParallelApps(attempt.getUser()) + && !exceedQueueMaxParallelApps(attempt.getCSLeafQueue()); + + attempt.setRunnable(attemptCanRun); + + return attemptCanRun; + } + + /** + * Checks whether the number of user runnable apps exceeds the limitation. + * + * @param user the user name + * @return true if the number hits the limit; false otherwise + */ + private boolean exceedUserMaxParallelApps(String user) { + Integer userNumRunnable = usersNumRunnableApps.get(user); + if (userNumRunnable == null) { + userNumRunnable = 0; + } + if (userNumRunnable >= getUserMaxParallelApps(user)) { + LOG.info("Maximum runnable apps exceeded for user {}", user); + return true; + } + + return false; + } + + /** + * Recursively checks whether the number of queue runnable apps exceeds the + * limitation. + * + * @param queue the current queue + * @return true if the number hits the limit; false otherwise + */ + private boolean exceedQueueMaxParallelApps(AbstractCSQueue queue) { + // Check queue and all parent queues + while (queue != null) { + if (queue.getNumRunnableApps() >= queue.getMaxParallelApps()) { + LOG.info("Maximum runnable apps exceeded for queue {}", + queue.getQueuePath()); + return true; + } + queue = (AbstractCSQueue) queue.getParent(); + } + + return false; + } + + public void trackApp(FiCaSchedulerApp app) { + if (app.isRunnable()) { + trackRunnableApp(app); + } else { + trackNonRunnableApp(app); + } + } + /** + * Tracks the given new runnable app for purposes of maintaining max running + * app limits. + */ + private void trackRunnableApp(FiCaSchedulerApp app) { + String user = app.getUser(); + AbstractCSQueue queue = (AbstractCSQueue) app.getQueue(); + // Increment running counts for all parent queues + ParentQueue parent = (ParentQueue) queue.getParent(); + while (parent != null) { + parent.incrementRunnableApps(); + parent = (ParentQueue) parent.getParent(); + } + + Integer userNumRunnable = usersNumRunnableApps.get(user); + usersNumRunnableApps.put(user, (userNumRunnable == null ? 0 + : userNumRunnable) + 1); + } + + /** + * Tracks the given new non runnable app so that it can be made runnable when + * it would not violate max running app limits. + */ + private void trackNonRunnableApp(FiCaSchedulerApp app) { + String user = app.getUser(); + usersNonRunnableApps.put(user, app); + } + + /** + * This is called after reloading the allocation configuration when the + * scheduler is reinitialized + * + * Checks to see whether any non-runnable applications become runnable + * now that the max running apps of given queue has been changed + * + * Runs in O(n) where n is the number of apps that are non-runnable and in + * the queues that went from having no slack to having slack. + */ + + public void updateRunnabilityOnReload() { + ParentQueue rootQueue = (ParentQueue) scheduler.getRootQueue(); + List> appsNowMaybeRunnable = + new ArrayList>(); + + gatherPossiblyRunnableAppLists(rootQueue, appsNowMaybeRunnable); + + updateAppsRunnability(appsNowMaybeRunnable, Integer.MAX_VALUE); + } + + /** + * Checks to see whether any other applications runnable now that the given + * application has been removed from the given queue. And makes them so. + * + * Runs in O(n log(n)) where n is the number of queues that are under the + * highest queue that went from having no slack to having slack. + */ + public void updateRunnabilityOnAppRemoval(FiCaSchedulerApp app) { + // childqueueX might have no pending apps itself, but if a queue higher up + // in the hierarchy parentqueueY has a maxRunningApps set, an app completion + // in childqueueX could allow an app in some other distant child of + // parentqueueY to become runnable. + // An app removal will only possibly allow another app to become runnable if + // the queue was already at its max before the removal. + // Thus we find the ancestor queue highest in the tree for which the app + // that was at its maxRunningApps before the removal. + LeafQueue queue = app.getCSLeafQueue(); + AbstractCSQueue highestQueueWithAppsNowRunnable = + (queue.getNumRunnableApps() == queue.getMaxParallelApps() - 1) + ? queue : null; + + ParentQueue parent = (ParentQueue) queue.getParent(); + while (parent != null) { + if (parent.getNumRunnableApps() == parent.getMaxParallelApps() - 1) { + highestQueueWithAppsNowRunnable = parent; + } + parent = (ParentQueue) parent.getParent(); + } + + List> appsNowMaybeRunnable = + new ArrayList>(); + + // Compile lists of apps which may now be runnable + // We gather lists instead of building a set of all non-runnable apps so + // that this whole operation can be O(number of queues) instead of + // O(number of apps) + if (highestQueueWithAppsNowRunnable != null) { + gatherPossiblyRunnableAppLists(highestQueueWithAppsNowRunnable, + appsNowMaybeRunnable); + } + String user = app.getUser(); + Integer userNumRunning = usersNumRunnableApps.get(user); + if (userNumRunning == null) { + userNumRunning = 0; + } + if (userNumRunning == getUserMaxParallelApps(user) - 1) { + List userWaitingApps = usersNonRunnableApps.get(user); + if (userWaitingApps != null) { + appsNowMaybeRunnable.add(userWaitingApps); + } + } + + updateAppsRunnability(appsNowMaybeRunnable, + appsNowMaybeRunnable.size()); + } + + /** + * Checks to see whether applications are runnable now by iterating + * through each one of them and check if the queue and user have slack. + * + * if we know how many apps can be runnable, there is no need to iterate + * through all apps, maxRunnableApps is used to break out of the iteration. + */ + private void updateAppsRunnability(List> + appsNowMaybeRunnable, int maxRunnableApps) { + // Scan through and check whether this means that any apps are now runnable + Iterator iter = new MultiListStartTimeIterator( + appsNowMaybeRunnable); + FiCaSchedulerApp prev = null; + List noLongerPendingApps = new ArrayList<>(); + while (iter.hasNext()) { + FiCaSchedulerApp next = iter.next(); + if (next == prev) { + continue; + } + + if (checkRunnabilityWithUpdate(next)) { + LeafQueue nextQueue = next.getCSLeafQueue(); + LOG.info("{} is now runnable in {}", + next.getApplicationAttemptId(), nextQueue); + trackRunnableApp(next); + FiCaSchedulerApp appSched = next; + nextQueue.submitApplicationAttempt(next, next.getUser()); + noLongerPendingApps.add(appSched); + + if (noLongerPendingApps.size() >= maxRunnableApps) { + break; + } + } + + prev = next; + } + + // We remove the apps from their pending lists afterwards so that we don't + // pull them out from under the iterator. If they are not in these lists + // in the first place, there is a bug. + for (FiCaSchedulerApp appSched : noLongerPendingApps) { + if (!(appSched.getCSLeafQueue().removeNonRunnableApp(appSched))) { + LOG.error("Can't make app runnable that does not already exist in queue" + + " as non-runnable: {}. This should never happen.", + appSched.getApplicationAttemptId()); + } + + if (!usersNonRunnableApps.remove(appSched.getUser(), appSched)) { + LOG.error("Waiting app {} expected to be in " + + "usersNonRunnableApps, but was not. This should never happen.", + appSched.getApplicationAttemptId()); + } + } + } + + public void untrackApp(FiCaSchedulerApp app) { + if (app.isRunnable()) { + untrackRunnableApp(app); + } else { + untrackNonRunnableApp(app); + } + } + + /** + * Updates the relevant tracking variables after a runnable app with the given + * queue and user has been removed. + */ + private void untrackRunnableApp(FiCaSchedulerApp app) { + // Update usersRunnableApps + String user = app.getUser(); + int newUserNumRunning = usersNumRunnableApps.get(user) - 1; + if (newUserNumRunning == 0) { + usersNumRunnableApps.remove(user); + } else { + usersNumRunnableApps.put(user, newUserNumRunning); + } + + // Update runnable app bookkeeping for queues + AbstractCSQueue queue = (AbstractCSQueue) app.getQueue(); + ParentQueue parent = (ParentQueue) queue.getParent(); + while (parent != null) { + parent.decrementRunnableApps(); + parent = (ParentQueue) parent.getParent(); + } + } + + /** + * Stops tracking the given non-runnable app. + */ + private void untrackNonRunnableApp(FiCaSchedulerApp app) { + usersNonRunnableApps.remove(app.getUser(), app); + } + + /** + * Traverses the queue hierarchy under the given queue to gather all lists + * of non-runnable applications. + */ + private void gatherPossiblyRunnableAppLists(AbstractCSQueue queue, + List> appLists) { + if (queue.getNumRunnableApps() < queue.getMaxParallelApps()) { + if (queue instanceof LeafQueue) { + appLists.add( + ((LeafQueue)queue).getCopyOfNonRunnableAppSchedulables()); + } else { + for (CSQueue child : queue.getChildQueues()) { + gatherPossiblyRunnableAppLists((AbstractCSQueue) child, appLists); + } + } + } + } + + private int getUserMaxParallelApps(String user) { + CapacitySchedulerConfiguration conf = scheduler.getConfiguration(); + if (conf == null) { + return Integer.MAX_VALUE; + } + + int userMaxParallelApps = conf.getMaxParallelAppsForUser(user); + + return userMaxParallelApps; + } + + /** + * Takes a list of lists, each of which is ordered by start time, and returns + * their elements in order of start time. + * + * We maintain positions in each of the lists. Each next() call advances + * the position in one of the lists. We maintain a heap that orders lists + * by the start time of the app in the current position in that list. + * This allows us to pick which list to advance in O(log(num lists)) instead + * of O(num lists) time. + */ + static class MultiListStartTimeIterator implements + Iterator { + + private List[] appLists; + private int[] curPositionsInAppLists; + private PriorityQueue appListsByCurStartTime; + + @SuppressWarnings("unchecked") + MultiListStartTimeIterator(List> appListList) { + appLists = appListList.toArray(new List[appListList.size()]); + curPositionsInAppLists = new int[appLists.length]; + appListsByCurStartTime = new PriorityQueue(); + for (int i = 0; i < appLists.length; i++) { + long time = appLists[i].isEmpty() ? Long.MAX_VALUE : appLists[i].get(0) + .getStartTime(); + appListsByCurStartTime.add(new IndexAndTime(i, time)); + } + } + + @Override + public boolean hasNext() { + return !appListsByCurStartTime.isEmpty() + && appListsByCurStartTime.peek().time != Long.MAX_VALUE; + } + + @Override + public FiCaSchedulerApp next() { + IndexAndTime indexAndTime = appListsByCurStartTime.remove(); + int nextListIndex = indexAndTime.index; + FiCaSchedulerApp next = appLists[nextListIndex] + .get(curPositionsInAppLists[nextListIndex]); + curPositionsInAppLists[nextListIndex]++; + + if (curPositionsInAppLists[nextListIndex] < + appLists[nextListIndex].size()) { + indexAndTime.time = appLists[nextListIndex] + .get(curPositionsInAppLists[nextListIndex]).getStartTime(); + } else { + indexAndTime.time = Long.MAX_VALUE; + } + appListsByCurStartTime.add(indexAndTime); + + return next; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Remove not supported"); + } + + private static class IndexAndTime implements Comparable { + private int index; + private long time; + + IndexAndTime(int index, long time) { + this.index = index; + this.time = time; + } + + @Override + public int compareTo(IndexAndTime o) { + return time < o.time ? -1 : (time > o.time ? 1 : 0); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof IndexAndTime)) { + return false; + } + IndexAndTime other = (IndexAndTime)o; + return other.time == time; + } + + @Override + public int hashCode() { + return (int)time; + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java index 1937573e4d404..47bdeba45df57 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java @@ -51,7 +51,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * CSQueue represents a node in the tree of diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java index e9a0aafe6ee05..586f837f147b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java @@ -78,6 +78,8 @@ public class CSQueueMetrics extends QueueMetrics { private static final String MAX_CAPACITY_METRIC_DESC = "MaxCapacity of NAME"; + private CSQueueMetricsForCustomResources csQueueMetricsForCustomResources; + CSQueueMetrics(MetricsSystem ms, String queueName, Queue parent, boolean enableUserMetrics, Configuration conf) { super(ms, queueName, parent, enableUserMetrics, conf); @@ -90,11 +92,14 @@ public class CSQueueMetrics extends QueueMetrics { * mandatory resources metrics */ protected void registerCustomResources() { - Map customResources = initAndGetCustomResources(); - registerCustomResources(customResources, GUARANTEED_CAPACITY_METRIC_PREFIX, - GUARANTEED_CAPACITY_METRIC_DESC); - registerCustomResources(customResources, MAX_CAPACITY_METRIC_PREFIX, - MAX_CAPACITY_METRIC_DESC); + Map customResources = + csQueueMetricsForCustomResources.initAndGetCustomResources(); + csQueueMetricsForCustomResources + .registerCustomResources(customResources, this.registry, + GUARANTEED_CAPACITY_METRIC_PREFIX, GUARANTEED_CAPACITY_METRIC_DESC); + csQueueMetricsForCustomResources + .registerCustomResources(customResources, this.registry, + MAX_CAPACITY_METRIC_PREFIX, MAX_CAPACITY_METRIC_DESC); super.registerCustomResources(); } @@ -184,12 +189,10 @@ public void setGuaranteedResources(String partition, Resource res) { if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { guaranteedMB.set(res.getMemorySize()); guaranteedVCores.set(res.getVirtualCores()); - if (getQueueMetricsForCustomResources() != null) { - ((CSQueueMetricsForCustomResources) getQueueMetricsForCustomResources()) - .setGuaranteedCapacity(res); - registerCustomResources( - ((CSQueueMetricsForCustomResources) - getQueueMetricsForCustomResources()).getGuaranteedCapacity(), + if (csQueueMetricsForCustomResources != null) { + csQueueMetricsForCustomResources.setGuaranteedCapacity(res); + csQueueMetricsForCustomResources.registerCustomResources( + csQueueMetricsForCustomResources.getGuaranteedCapacity(), registry, GUARANTEED_CAPACITY_METRIC_PREFIX, GUARANTEED_CAPACITY_METRIC_DESC); } } @@ -207,12 +210,10 @@ public void setMaxCapacityResources(String partition, Resource res) { if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { maxCapacityMB.set(res.getMemorySize()); maxCapacityVCores.set(res.getVirtualCores()); - if (getQueueMetricsForCustomResources() != null) { - ((CSQueueMetricsForCustomResources) getQueueMetricsForCustomResources()) - .setMaxCapacity(res); - registerCustomResources( - ((CSQueueMetricsForCustomResources) - getQueueMetricsForCustomResources()).getMaxCapacity(), + if (csQueueMetricsForCustomResources != null) { + csQueueMetricsForCustomResources.setMaxCapacity(res); + csQueueMetricsForCustomResources.registerCustomResources( + csQueueMetricsForCustomResources.getMaxCapacity(), registry, MAX_CAPACITY_METRIC_PREFIX, MAX_CAPACITY_METRIC_DESC); } } @@ -221,7 +222,9 @@ public void setMaxCapacityResources(String partition, Resource res) { @Override protected void createQueueMetricsForCustomResources() { if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) { - setQueueMetricsForCustomResources(new CSQueueMetricsForCustomResources()); + this.csQueueMetricsForCustomResources = + new CSQueueMetricsForCustomResources(); + setQueueMetricsForCustomResources(csQueueMetricsForCustomResources); registerCustomResources(); } } @@ -229,7 +232,7 @@ protected void createQueueMetricsForCustomResources() { public synchronized static CSQueueMetrics forQueue(String queueName, Queue parent, boolean enableUserMetrics, Configuration conf) { MetricsSystem ms = DefaultMetricsSystem.instance(); - QueueMetrics metrics = QueueMetrics.getQueueMetrics().get(queueName); + QueueMetrics metrics = getQueueMetrics().get(queueName); if (metrics == null) { metrics = new CSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf) @@ -241,7 +244,7 @@ public synchronized static CSQueueMetrics forQueue(String queueName, ms.register(sourceName(queueName).toString(), "Metrics for queue: " + queueName, metrics); } - QueueMetrics.getQueueMetrics().put(queueName, metrics); + getQueueMetrics().put(queueName, metrics); } return (CSQueueMetrics) metrics; @@ -254,7 +257,8 @@ public synchronized QueueMetrics getUserMetrics(String userName) { } CSQueueMetrics metrics = (CSQueueMetrics) users.get(userName); if (metrics == null) { - metrics = new CSQueueMetrics(metricsSystem, queueName, null, false, conf); + metrics = + new CSQueueMetrics(metricsSystem, queueName, null, false, conf); users.put(userName, metrics); metricsSystem.register( sourceName(queueName).append(",user=").append(userName).toString(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueStore.java index 04758235cac94..a551de15d8e86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueStore.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import java.util.Collection; import java.util.HashMap; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java index 79fe7bf6c4f0f..6deb7da582bd7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java @@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class CSQueueUtils { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 861dc432a870a..890334f17bb95 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -162,9 +162,9 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.QUEUE_MAPPING; @@ -224,7 +224,8 @@ public Configuration getConf() { private boolean usePortForNodeName; private boolean scheduleAsynchronously; - private List asyncSchedulerThreads; + @VisibleForTesting + protected List asyncSchedulerThreads; private ResourceCommitterService resourceCommitterService; private RMNodeLabelsManager labelManager; private AppPriorityACLsManager appPriorityACLManager; @@ -242,8 +243,11 @@ public Configuration getConf() { private static final long DEFAULT_ASYNC_SCHEDULER_INTERVAL = 5; private long asyncMaxPendingBacklogs; + private CSMaxRunningAppsEnforcer maxRunningEnforcer; + public CapacityScheduler() { super(CapacityScheduler.class.getName()); + this.maxRunningEnforcer = new CSMaxRunningAppsEnforcer(this); } @Override @@ -483,6 +487,7 @@ public void reinitialize(Configuration newConf, RMContext rmContext, super.reinitialize(newConf, rmContext); } + maxRunningEnforcer.updateRunnabilityOnReload(); } finally { writeLock.unlock(); } @@ -698,8 +703,11 @@ public void updatePlacementRules() throws IOException { Set distinguishRuleSet = CapacitySchedulerConfigValidator .validatePlacementRules(placementRuleStrs); - // add UserGroupMappingPlacementRule if absent - distinguishRuleSet.add(YarnConfiguration.USER_GROUP_PLACEMENT_RULE); + // add UserGroupMappingPlacementRule if empty,default value of + // yarn.scheduler.queue-placement-rules is user-group + if (distinguishRuleSet.isEmpty()) { + distinguishRuleSet.add(YarnConfiguration.USER_GROUP_PLACEMENT_RULE); + } placementRuleStrs = new ArrayList<>(distinguishRuleSet); @@ -989,12 +997,14 @@ private void addApplication(ApplicationId applicationId, String queueName, // not auto-created above, then its parent queue should match // the parent queue specified in queue mapping } else if (!queue.getParent().getQueueShortName().equals( - placementContext.getParentQueue())) { + placementContext.getParentQueue()) + && !queue.getParent().getQueuePath().equals( + placementContext.getParentQueue())) { String message = "Auto created Leaf queue " + placementContext.getQueue() + " " + "already exists under queue : " + queue .getParent().getQueueShortName() - + ".But Queue mapping configuration " + + + ". But Queue mapping configuration " + CapacitySchedulerConfiguration.QUEUE_MAPPING + " has been " + "updated to a different parent queue : " + placementContext.getParentQueue() @@ -1078,6 +1088,9 @@ private void addApplicationAttempt( // SchedulerApplication#setCurrentAppAttempt. attempt.setPriority(application.getPriority()); + maxRunningEnforcer.checkRunnabilityWithUpdate(attempt); + maxRunningEnforcer.trackApp(attempt); + queue.submitApplicationAttempt(attempt, application.getUser()); LOG.info("Added Application Attempt " + applicationAttemptId + " to scheduler from user " + application.getUser() + " in queue " @@ -1171,8 +1184,13 @@ private void doneApplicationAttempt( LOG.error( "Cannot finish application " + "from non-leaf queue: " + csQueue.getQueuePath()); - } else{ + } else { csQueue.finishApplicationAttempt(attempt, csQueue.getQueuePath()); + + maxRunningEnforcer.untrackApp(attempt); + if (attempt.isRunnable()) { + maxRunningEnforcer.updateRunnabilityOnAppRemoval(attempt); + } } } finally { writeLock.unlock(); @@ -2268,6 +2286,21 @@ private void markContainerForNonKillable( public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl, String queueName) { CSQueue queue = getQueue(queueName); + + if (queueName.startsWith("root.")) { + // can only check proper ACLs if the path is fully qualified + while (queue == null) { + int sepIndex = queueName.lastIndexOf("."); + String parentName = queueName.substring(0, sepIndex); + if (LOG.isDebugEnabled()) { + LOG.debug("Queue {} does not exist, checking parent {}", + queueName, parentName); + } + queueName = parentName; + queue = queueManager.getQueue(queueName); + } + } + if (queue == null) { LOG.debug("ACL not found for queue access-type {} for queue {}", acl, queueName); @@ -2686,10 +2719,10 @@ public Priority checkAndGetApplicationPriority( } // Lets check for ACLs here. - if (!appPriorityACLManager.checkAccess(user, queuePath, appPriority)) { + if (!appPriorityACLManager.checkAccess(user, normalizeQueueName(queuePath), appPriority)) { throw new YarnException(new AccessControlException( - "User " + user + " does not have permission to submit/update " - + applicationId + " for " + appPriority)); + "User " + user + " does not have permission to submit/update " + + applicationId + " for " + appPriority)); } LOG.info("Priority '" + appPriority.getPriority() @@ -3266,4 +3299,22 @@ public boolean isMultiNodePlacementEnabled() { public int getNumAsyncSchedulerThreads() { return asyncSchedulerThreads == null ? 0 : asyncSchedulerThreads.size(); } + + @VisibleForTesting + public void setMaxRunningAppsEnforcer(CSMaxRunningAppsEnforcer enforcer) { + this.maxRunningEnforcer = enforcer; + } + + /** + * Returning true as capacity scheduler supports placement constraints. + */ + @Override + public boolean placementConstraintEnabled() { + return true; + } + + @VisibleForTesting + public void setQueueManager(CapacitySchedulerQueueManager qm) { + this.queueManager = qm; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java index 676c6d177b390..c3b4df4efdf46 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java @@ -28,7 +28,7 @@ import java.io.IOException; import java.util.Collection; -import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.Set; public final class CapacitySchedulerConfigValidator { @@ -42,19 +42,23 @@ private CapacitySchedulerConfigValidator() { public static boolean validateCSConfiguration( final Configuration oldConf, final Configuration newConf, final RMContext rmContext) throws IOException { - //TODO: extract all the validation steps and replace reinitialize with - //the specific validation steps CapacityScheduler newCs = new CapacityScheduler(); - newCs.setConf(oldConf); - newCs.setRMContext(rmContext); - newCs.init(oldConf); - newCs.reinitialize(newConf, rmContext, true); - return true; + try { + //TODO: extract all the validation steps and replace reinitialize with + //the specific validation steps + newCs.setConf(oldConf); + newCs.setRMContext(rmContext); + newCs.init(oldConf); + newCs.reinitialize(newConf, rmContext, true); + return true; + } finally { + newCs.stop(); + } } public static Set validatePlacementRules( Collection placementRuleStrs) throws IOException { - Set distinguishRuleSet = new HashSet<>(); + Set distinguishRuleSet = new LinkedHashSet<>(); // fail the case if we get duplicate placementRule add in for (String pls : placementRuleStrs) { if (!distinguishRuleSet.add(pls)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 3ae66cab7d952..36d2e6c3716b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueuePlacementRuleUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,7 +42,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.QueueMappingBuilder; -import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMappingEntity; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AppPriorityACLConfigurationParser.AppPriorityACLKeyType; @@ -377,7 +376,12 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur public static final String PATTERN_FOR_ABSOLUTE_RESOURCE = "^\\[[\\w\\.,\\-_=\\ /]+\\]$"; - private static final Pattern RESOURCE_PATTERN = Pattern.compile(PATTERN_FOR_ABSOLUTE_RESOURCE); + public static final Pattern RESOURCE_PATTERN = + Pattern.compile(PATTERN_FOR_ABSOLUTE_RESOURCE); + + public static final String MAX_PARALLEL_APPLICATIONS = "max-parallel-apps"; + + public static final int DEFAULT_MAX_PARALLEL_APPLICATIONS = Integer.MAX_VALUE; /** * Different resource types supported. @@ -413,7 +417,11 @@ static String getQueueOrderingPolicyPrefix(String queue) { String queueName = PREFIX + queue + DOT + ORDERING_POLICY + DOT; return queueName; } - + + static String getUserPrefix(String user) { + return PREFIX + "user." + user + DOT; + } + private String getNodeLabelPrefix(String queue, String label) { if (label.equals(CommonNodeLabelsManager.NO_LABEL)) { return getQueuePrefix(queue); @@ -1033,12 +1041,12 @@ public void setOverrideWithQueueMappings(boolean overrideWithQueueMappings) { setBoolean(ENABLE_QUEUE_MAPPING_OVERRIDE, overrideWithQueueMappings); } - public List getQueueMappingEntity( + public List getQueueMappingEntity( String queueMappingSuffix) { String queueMappingName = buildQueueMappingRuleProperty(queueMappingSuffix); - List mappings = - new ArrayList(); + List mappings = + new ArrayList(); Collection mappingsString = getTrimmedStringCollection(queueMappingName); for (String mappingValue : mappingsString) { @@ -1052,10 +1060,11 @@ public List getQueueMappingEntity( //Mappings should be consistent, and have the parent path parsed // from the beginning - QueueMappingEntity m = new QueueMappingEntity( - mapping[0], - QueuePlacementRuleUtils.extractQueuePath(mapping[1])); - + QueueMapping m = QueueMapping.QueueMappingBuilder.create() + .type(QueueMapping.MappingType.APPLICATION) + .source(mapping[0]) + .parsePathString(mapping[1]) + .build(); mappings.add(m); } @@ -1070,15 +1079,15 @@ private String buildQueueMappingRuleProperty (String queueMappingSuffix) { } @VisibleForTesting - public void setQueueMappingEntities(List queueMappings, + public void setQueueMappingEntities(List queueMappings, String queueMappingSuffix) { if (queueMappings == null) { return; } List queueMappingStrs = new ArrayList<>(); - for (QueueMappingEntity mapping : queueMappings) { - queueMappingStrs.add(mapping.toString()); + for (QueueMapping mapping : queueMappings) { + queueMappingStrs.add(mapping.toTypelessString()); } String mappingRuleProp = buildQueueMappingRuleProperty(queueMappingSuffix); @@ -1130,7 +1139,7 @@ public List getQueueMappings() { m = QueueMappingBuilder.create() .type(mappingType) .source(mapping[1]) - .queuePath(QueuePlacementRuleUtils.extractQueuePath(mapping[2])) + .parsePathString(mapping[2]) .build(); } catch (Throwable t) { throw new IllegalArgumentException( @@ -1386,6 +1395,31 @@ public boolean shouldAppFailFast(Configuration conf) { return conf.getBoolean(APP_FAIL_FAST, DEFAULT_APP_FAIL_FAST); } + public Integer getMaxParallelAppsForQueue(String queue) { + int defaultMaxParallelAppsForQueue = + getInt(PREFIX + MAX_PARALLEL_APPLICATIONS, + DEFAULT_MAX_PARALLEL_APPLICATIONS); + + String maxParallelAppsForQueue = get(getQueuePrefix(queue) + + MAX_PARALLEL_APPLICATIONS); + + return (maxParallelAppsForQueue != null) ? + Integer.parseInt(maxParallelAppsForQueue) + : defaultMaxParallelAppsForQueue; + } + + public Integer getMaxParallelAppsForUser(String user) { + int defaultMaxParallelAppsForUser = + getInt(PREFIX + "user." + MAX_PARALLEL_APPLICATIONS, + DEFAULT_MAX_PARALLEL_APPLICATIONS); + String maxParallelAppsForUser = get(getUserPrefix(user) + + MAX_PARALLEL_APPLICATIONS); + + return (maxParallelAppsForUser != null) ? + Integer.parseInt(maxParallelAppsForUser) + : defaultMaxParallelAppsForUser; + } + private static final String PREEMPTION_CONFIG_PREFIX = "yarn.resourcemanager.monitor.capacity.preemption."; @@ -1509,6 +1543,21 @@ public boolean shouldAppFailFast(Configuration conf) { + INTRA_QUEUE_PREEMPTION_CONFIG_PREFIX + "preemption-order-policy"; public static final String DEFAULT_INTRAQUEUE_PREEMPTION_ORDER_POLICY = "userlimit_first"; + /** + * Flag to determine whether or not to preempt containers from apps where some + * used resources are less than the user's user limit. + */ + public static final String CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF = + PREEMPTION_CONFIG_PREFIX + "conservative-drf"; + public static final Boolean DEFAULT_CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF = + false; + + public static final String IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF = + PREEMPTION_CONFIG_PREFIX + INTRA_QUEUE_PREEMPTION_CONFIG_PREFIX + + "conservative-drf"; + public static final Boolean DEFAULT_IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF = + true; + /** * Should we allow queues continue grow after all queue reaches their * guaranteed capacity. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java index 5f8988b077811..315c9f6c930f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsSystem; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java index 7bac12cf6c433..a44929beed669 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java @@ -47,7 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement; import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 64b153637929e..96d309c547ed5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -22,6 +22,7 @@ import java.util.*; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.time.DateUtils; @@ -75,8 +76,8 @@ import org.apache.hadoop.yarn.util.SystemClock; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; @Private @Unstable @@ -121,6 +122,16 @@ public class LeafQueue extends AbstractCSQueue { private volatile OrderingPolicy orderingPolicy = null; + // Map>> + // Not thread safe: only the last level is a ConcurrentMap + @VisibleForTesting + Map>> + userLimitsCache = new HashMap<>(); + + // Not thread safe + @VisibleForTesting + long currentUserLimitCacheVersion = 0; + // record all ignore partition exclusivityRMContainer, this will be used to do // preemption, key is the partition of the RMContainer allocated on private Map> ignorePartitionExclusivityRMContainers = @@ -129,6 +140,9 @@ public class LeafQueue extends AbstractCSQueue { List priorityAcls = new ArrayList(); + private final List runnableApps = new ArrayList<>(); + private final List nonRunnableApps = new ArrayList<>(); + @SuppressWarnings({ "unchecked", "rawtypes" }) public LeafQueue(CapacitySchedulerContext cs, String queueName, CSQueue parent, CSQueue old) throws IOException { @@ -159,6 +173,7 @@ protected void setupQueueConfigs(Resource clusterResource) setupQueueConfigs(clusterResource, csContext.getConfiguration()); } + @SuppressWarnings("checkstyle:nowhitespaceafter") protected void setupQueueConfigs(Resource clusterResource, CapacitySchedulerConfiguration conf) throws IOException { @@ -289,7 +304,9 @@ protected void setupQueueConfigs(Resource clusterResource, + " (int)(configuredMaximumSystemApplications * absoluteCapacity)]" + "\n" + "maxApplicationsPerUser = " + maxApplicationsPerUser + " [= (int)(maxApplications * (userLimit / 100.0f) * " - + "userLimitFactor) ]" + "\n" + "usedCapacity = " + + "userLimitFactor) ]" + "\n" + + "maxParallelApps = " + getMaxParallelApps() + "\n" + + "usedCapacity = " + + queueCapacities.getUsedCapacity() + " [= usedResourcesMemory / " + "(clusterResourceMemory * absoluteCapacity)]" + "\n" + "absoluteUsedCapacity = " + absoluteUsedCapacity @@ -386,7 +403,8 @@ void setUserLimitFactor(float userLimitFactor) { public int getNumApplications() { readLock.lock(); try { - return getNumPendingApplications() + getNumActiveApplications(); + return getNumPendingApplications() + getNumActiveApplications() + + getNumNonRunnableApps(); } finally { readLock.unlock(); } @@ -887,16 +905,28 @@ protected void activateApplications() { writeLock.unlock(); } } - + private void addApplicationAttempt(FiCaSchedulerApp application, User user) { writeLock.lock(); try { + applicationAttemptMap.put(application.getApplicationAttemptId(), + application); + + if (application.isRunnable()) { + runnableApps.add(application); + LOG.debug("Adding runnable application: {}", + application.getApplicationAttemptId()); + } else { + nonRunnableApps.add(application); + LOG.info("Application attempt {} is not runnable," + + " parallel limit reached", application.getApplicationAttemptId()); + return; + } + // Accept user.submitApplication(); getPendingAppsOrderingPolicy().addSchedulableEntity(application); - applicationAttemptMap.put(application.getApplicationAttemptId(), - application); // Activate applications if (Resources.greaterThan(resourceCalculator, lastClusterResource, @@ -917,7 +947,9 @@ private void addApplicationAttempt(FiCaSchedulerApp application, .getPendingApplications() + " #user-active-applications: " + user .getActiveApplications() + " #queue-pending-applications: " + getNumPendingApplications() + " #queue-active-applications: " - + getNumActiveApplications()); + + getNumActiveApplications() + + " #queue-nonrunnable-applications: " + + getNumNonRunnableApps()); } finally { writeLock.unlock(); } @@ -950,6 +982,15 @@ private void removeApplicationAttempt( // which is caused by wrong invoking order, will fix UT separately User user = usersManager.getUserAndAddIfAbsent(userName); + boolean runnable = runnableApps.remove(application); + if (!runnable) { + // removeNonRunnableApp acquires the write lock again, which is fine + if (!removeNonRunnableApp(application)) { + LOG.error("Given app to remove " + application + + " does not exist in queue " + getQueuePath()); + } + } + String partitionName = application.getAppAMNodePartitionName(); boolean wasActive = orderingPolicy.removeSchedulableEntity(application); if (!wasActive) { @@ -1008,11 +1049,15 @@ private void setPreemptionAllowed(ResourceLimits limits, String nodePartition) { private CSAssignment allocateFromReservedContainer(Resource clusterResource, CandidateNodeSet candidates, ResourceLimits currentResourceLimits, SchedulingMode schedulingMode) { - // Considering multi-node scheduling, its better to iterate through - // all candidates and stop once we get atleast one good node to allocate - // where reservation was made earlier. In normal case, there is only one - // node and hence there wont be any impact after this change. - for (FiCaSchedulerNode node : candidates.getAllNodes().values()) { + + // Irrespective of Single / Multi Node Placement, the allocate from + // Reserved Container has to happen only for the single node which + // CapacityScheduler#allocateFromReservedContainer invokes with. + // Else In Multi Node Placement, there won't be any Allocation or + // Reserve of new containers when there is a RESERVED container on + // a node which is full. + FiCaSchedulerNode node = CandidateNodeSetUtils.getSingleNode(candidates); + if (node != null) { RMContainer reservedContainer = node.getReservedContainer(); if (reservedContainer != null) { FiCaSchedulerApp application = getApplication( @@ -1032,6 +1077,47 @@ private CSAssignment allocateFromReservedContainer(Resource clusterResource, return null; } + private ConcurrentMap getUserLimitCache( + String partition, + SchedulingMode schedulingMode) { + synchronized (userLimitsCache) { + long latestVersion = usersManager.getLatestVersionOfUsersState(); + + if (latestVersion != this.currentUserLimitCacheVersion) { + // User limits cache needs invalidating + this.currentUserLimitCacheVersion = latestVersion; + userLimitsCache.clear(); + + Map> + uLCByPartition = new HashMap<>(); + userLimitsCache.put(partition, uLCByPartition); + + ConcurrentMap uLCBySchedulingMode = + new ConcurrentHashMap<>(); + uLCByPartition.put(schedulingMode, uLCBySchedulingMode); + + return uLCBySchedulingMode; + } + + // User limits cache does not need invalidating + Map> + uLCByPartition = userLimitsCache.get(partition); + if (uLCByPartition == null) { + uLCByPartition = new HashMap<>(); + userLimitsCache.put(partition, uLCByPartition); + } + + ConcurrentMap uLCBySchedulingMode = + uLCByPartition.get(schedulingMode); + if (uLCBySchedulingMode == null) { + uLCBySchedulingMode = new ConcurrentHashMap<>(); + uLCByPartition.put(schedulingMode, uLCBySchedulingMode); + } + + return uLCBySchedulingMode; + } + } + @Override public CSAssignment assignContainers(Resource clusterResource, CandidateNodeSet candidates, @@ -1078,7 +1164,8 @@ public CSAssignment assignContainers(Resource clusterResource, return CSAssignment.NULL_ASSIGNMENT; } - Map userLimits = new HashMap<>(); + ConcurrentMap userLimits = + this.getUserLimitCache(candidates.getPartition(), schedulingMode); boolean needAssignToQueueCheck = true; IteratorSelector sel = new IteratorSelector(); sel.setPartition(candidates.getPartition()); @@ -1123,7 +1210,13 @@ public CSAssignment assignContainers(Resource clusterResource, cachedUserLimit); if (cul == null) { cul = new CachedUserLimit(userLimit); - userLimits.put(application.getUser(), cul); + CachedUserLimit retVal = + userLimits.putIfAbsent(application.getUser(), cul); + if (retVal != null) { + // another thread updated the user limit cache before us + cul = retVal; + userLimit = cul.userLimit; + } } // Check user limit boolean userAssignable = true; @@ -1393,8 +1486,9 @@ private Resource getHeadroom(User user, : getQueueMaxResource(partition); Resource headroom = Resources.componentwiseMin( - Resources.subtract(userLimitResource, user.getUsed(partition)), - Resources.subtract(currentPartitionResourceLimit, + Resources.subtractNonNegative(userLimitResource, + user.getUsed(partition)), + Resources.subtractNonNegative(currentPartitionResourceLimit, queueUsage.getUsed(partition))); // Normalize it before return headroom = @@ -1539,8 +1633,7 @@ protected boolean canAssignToUser(Resource clusterResource, user.getUsed(nodePartition), limit)) { // if enabled, check to see if could we potentially use this node instead // of a reserved node if the application has reserved containers - if (this.reservationsContinueLooking && nodePartition.equals( - CommonNodeLabelsManager.NO_LABEL)) { + if (this.reservationsContinueLooking) { if (Resources.lessThanOrEqual(resourceCalculator, clusterResource, Resources.subtract(user.getUsed(), application.getCurrentReservation()), limit)) { @@ -1714,11 +1807,16 @@ void allocateResource(Resource clusterResource, User user = usersManager.updateUserResourceUsage(userName, resource, nodePartition, true); - // Note this is a bit unconventional since it gets the object and modifies - // it here, rather then using set routine - Resources.subtractFrom(application.getHeadroom(), resource); // headroom - metrics.setAvailableResourcesToUser(nodePartition, - userName, application.getHeadroom()); + Resource partitionHeadroom = Resources.createResource(0, 0); + if (metrics.getUserMetrics(userName) != null) { + partitionHeadroom = getHeadroom(user, + cachedResourceLimitsForHeadroom.getLimit(), clusterResource, + getResourceLimitForActiveUsers(userName, clusterResource, + nodePartition, SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), + nodePartition); + } + metrics.setAvailableResourcesToUser(nodePartition, userName, + partitionHeadroom); if (LOG.isDebugEnabled()) { LOG.debug(getQueuePath() + " user=" + userName + " used=" @@ -1757,8 +1855,16 @@ void releaseResource(Resource clusterResource, User user = usersManager.updateUserResourceUsage(userName, resource, nodePartition, false); - metrics.setAvailableResourcesToUser(nodePartition, - userName, application.getHeadroom()); + Resource partitionHeadroom = Resources.createResource(0, 0); + if (metrics.getUserMetrics(userName) != null) { + partitionHeadroom = getHeadroom(user, + cachedResourceLimitsForHeadroom.getLimit(), clusterResource, + getResourceLimitForActiveUsers(userName, clusterResource, + nodePartition, SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), + nodePartition); + } + metrics.setAvailableResourcesToUser(nodePartition, userName, + partitionHeadroom); if (LOG.isDebugEnabled()) { LOG.debug( @@ -2187,8 +2293,8 @@ public Set getAllUsers() { static class CachedUserLimit { final Resource userLimit; - boolean canAssign = true; - Resource reservation = Resources.none(); + volatile boolean canAssign = true; + volatile Resource reservation = Resources.none(); CachedUserLimit(Resource userLimit) { this.userLimit = userLimit; @@ -2211,4 +2317,43 @@ private void updateQueuePreemptionMetrics(RMContainer rmc) { usedSeconds); metrics.updatePreemptedForCustomResources(containerResource); } + + @Override + int getNumRunnableApps() { + readLock.lock(); + try { + return runnableApps.size(); + } finally { + readLock.unlock(); + } + } + + int getNumNonRunnableApps() { + readLock.lock(); + try { + return nonRunnableApps.size(); + } finally { + readLock.unlock(); + } + } + + boolean removeNonRunnableApp(FiCaSchedulerApp app) { + writeLock.lock(); + try { + return nonRunnableApps.remove(app); + } finally { + writeLock.unlock(); + } + } + + List getCopyOfNonRunnableAppSchedulables() { + List appsToReturn = new ArrayList<>(); + readLock.lock(); + try { + appsToReturn.addAll(nonRunnableApps); + } finally { + readLock.unlock(); + } + return appsToReturn; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index d814a6054bccc..1f31d8b0a0da0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -93,6 +93,8 @@ public class ParentQueue extends AbstractCSQueue { private long lastSkipQueueDebugLoggingTimestamp = -1; + private int runnableApps; + public ParentQueue(CapacitySchedulerContext cs, String queueName, CSQueue parent, CSQueue old) throws IOException { super(cs, queueName, parent, old); @@ -1122,8 +1124,18 @@ private void deriveCapacityFromAbsoluteConfigurations(String label, if (childQueue instanceof LeafQueue) { LeafQueue leafQueue = (LeafQueue) childQueue; CapacitySchedulerConfiguration conf = csContext.getConfiguration(); - int maxApplications = (int) (conf.getMaximumSystemApplications() - * childQueue.getQueueCapacities().getAbsoluteCapacity(label)); + int maxApplications = + conf.getMaximumApplicationsPerQueue(childQueue.getQueuePath()); + if (maxApplications < 0) { + int maxGlobalPerQueueApps = conf.getGlobalMaximumApplicationsPerQueue(); + if (maxGlobalPerQueueApps > 0) { + maxApplications = (int) (maxGlobalPerQueueApps * + childQueue.getQueueCapacities().getAbsoluteCapacity(label)); + } else { + maxApplications = (int) (conf.getMaximumSystemApplications() + * childQueue.getQueueCapacities().getAbsoluteCapacity(label)); + } + } leafQueue.setMaxApplications(maxApplications); int maxApplicationsPerUser = Math.min(maxApplications, @@ -1382,4 +1394,32 @@ public void stopQueue() { public QueueOrderingPolicy getQueueOrderingPolicy() { return queueOrderingPolicy; } + + @Override + int getNumRunnableApps() { + readLock.lock(); + try { + return runnableApps; + } finally { + readLock.unlock(); + } + } + + void incrementRunnableApps() { + writeLock.lock(); + try { + runnableApps++; + } finally { + writeLock.unlock(); + } + } + + void decrementRunnableApps() { + writeLock.lock(); + try { + runnableApps--; + } finally { + writeLock.unlock(); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java index e6527ae4aa532..1b175d1ff5c84 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java index 00b3a77d6fc00..14766e9953d46 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java @@ -24,7 +24,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; @@ -41,7 +40,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * {@link UsersManager} tracks users in the system and its respective data @@ -72,7 +71,7 @@ public class UsersManager implements AbstractUsersManager { // To detect whether there is a change in user count for every user-limit // calculation. - private AtomicLong latestVersionOfUsersState = new AtomicLong(0); + private long latestVersionOfUsersState = 0; private Map> localVersionOfActiveUsersState = new HashMap>(); private Map> localVersionOfAllUsersState = @@ -91,8 +90,12 @@ public class UsersManager implements AbstractUsersManager { new HashMap>(); // Pre-computed list of user-limits. - Map> preComputedActiveUserLimit = new ConcurrentHashMap<>(); - Map> preComputedAllUserLimit = new ConcurrentHashMap<>(); + @VisibleForTesting + Map> preComputedActiveUserLimit = + new HashMap<>(); + @VisibleForTesting + Map> preComputedAllUserLimit = + new HashMap<>(); private float activeUsersTimesWeights = 0.0f; private float allUsersTimesWeights = 0.0f; @@ -361,9 +364,9 @@ public void userLimitNeedsRecompute() { writeLock.lock(); try { - long value = latestVersionOfUsersState.incrementAndGet(); + long value = ++latestVersionOfUsersState; if (value < 0) { - latestVersionOfUsersState.set(0); + latestVersionOfUsersState = 0; } } finally { writeLock.unlock(); @@ -581,6 +584,15 @@ public Resource getComputedResourceLimitForAllUsers(String userName, return userSpecificUserLimit; } + protected long getLatestVersionOfUsersState() { + readLock.lock(); + try { + return latestVersionOfUsersState; + } finally { + readLock.unlock(); + } + } + /* * Recompute user-limit under following conditions: 1. cached user-limit does * not exist in local map. 2. Total User count doesn't match with local cached @@ -588,8 +600,13 @@ public Resource getComputedResourceLimitForAllUsers(String userName, */ private boolean isRecomputeNeeded(SchedulingMode schedulingMode, String nodePartition, boolean isActive) { - return (getLocalVersionOfUsersState(nodePartition, schedulingMode, - isActive) != latestVersionOfUsersState.get()); + readLock.lock(); + try { + return (getLocalVersionOfUsersState(nodePartition, schedulingMode, + isActive) != latestVersionOfUsersState); + } finally { + readLock.unlock(); + } } /* @@ -610,7 +627,7 @@ private void setLocalVersionOfUsersState(String nodePartition, localVersionOfUsersState.put(nodePartition, localVersion); } - localVersion.put(schedulingMode, latestVersionOfUsersState.get()); + localVersion.put(schedulingMode, latestVersionOfUsersState); } finally { writeLock.unlock(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/WorkflowPriorityMappingsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/WorkflowPriorityMappingsManager.java index 70387477532dc..fa6977aadffce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/WorkflowPriorityMappingsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/WorkflowPriorityMappingsManager.java @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @Private @VisibleForTesting diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java index 1dacc962424a9..cced238b60164 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java @@ -79,12 +79,11 @@ private boolean checkHeadroom(Resource clusterResource, String nodePartition) { // If headroom + currentReservation < required, we cannot allocate this // require - Resource resourceCouldBeUnReserved = application.getCurrentReservation(); - if (!application.getCSLeafQueue().getReservationContinueLooking() - || !nodePartition.equals(RMNodeLabelsManager.NO_LABEL)) { - // If we don't allow reservation continuous looking, OR we're looking at - // non-default node partition, we won't allow to unreserve before - // allocation. + Resource resourceCouldBeUnReserved = + application.getAppAttemptResourceUsage().getReserved(nodePartition); + if (!application.getCSLeafQueue().getReservationContinueLooking()) { + // If we don't allow reservation continuous looking, + // we won't allow to unreserve before allocation. resourceCouldBeUnReserved = Resources.none(); } return Resources.greaterThanOrEqual(rc, clusterResource, Resources.add( @@ -583,13 +582,10 @@ private ContainerAllocation assignContainer(Resource clusterResource, // Allocate... // We will only do continuous reservation when this is not allocated from // reserved container - if (rmContainer == null && reservationsContinueLooking - && node.getLabels().isEmpty()) { + if (rmContainer == null && reservationsContinueLooking) { // when reservationsContinueLooking is set, we may need to unreserve // some containers to meet this queue, its parents', or the users' // resource limits. - // TODO, need change here when we want to support continuous reservation - // looking for labeled partitions. if (!shouldAllocOrReserveNewContainer || needToUnreserve) { if (!needToUnreserve) { // If we shouldn't allocate/reserve new container then we should @@ -837,6 +833,7 @@ private ContainerAllocation allocate(Resource clusterResource, // Do checks before determining which node to allocate // Directly return if this check fails. ContainerAllocation result; + ContainerAllocation lastReservation = null; AppPlacementAllocator schedulingPS = application.getAppSchedulingInfo().getAppPlacementAllocator( @@ -878,11 +875,24 @@ private ContainerAllocation allocate(Resource clusterResource, result = tryAllocateOnNode(clusterResource, node, schedulingMode, resourceLimits, schedulerKey, reservedContainer); - if (AllocationState.ALLOCATED == result.getAllocationState() - || AllocationState.RESERVED == result.getAllocationState()) { + if (AllocationState.ALLOCATED == result.getAllocationState()) { result = doAllocation(result, node, schedulerKey, reservedContainer); break; } + + // In MultiNodePlacement, Try Allocate on other Available nodes + // from Iterator as well before Reserving. Else there won't be any + // Allocate of new containers when the first node in the + // iterator could not fit and returns RESERVED allocation. + if (AllocationState.RESERVED == result.getAllocationState()) { + lastReservation = result; + if (iter.hasNext()) { + continue; + } else { + result = doAllocation(lastReservation, node, schedulerKey, + reservedContainer); + } + } } return result; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java index 97d9933109a05..263dc1221469c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java @@ -25,7 +25,7 @@ import java.util.List; import java.util.Map; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.google.gson.GsonBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -358,6 +358,7 @@ protected Version getCurrentVersion() { return CURRENT_VERSION_INFO; } + @Override public void close() throws IOException { if (fileSystem != null) { fileSystem.close(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java index dc2c724aac7f9..0fc0ecc41fa6e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java @@ -22,6 +22,7 @@ import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import java.io.IOException; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -81,37 +82,78 @@ public long getConfigVersion() { return configVersion; } + /** + * Configuration mutations not logged (i.e. not persisted) but directly + * confirmed. As such, a list of persisted configuration mutations does not + * exist. + * @return null Configuration mutation list not applicable for this store. + */ @Override public List getConfirmedConfHistory(long fromId) { // Unimplemented. return null; } + /** + * Configuration mutations not logged (i.e. not persisted) but directly + * confirmed. As such, a list of persisted configuration mutations does not + * exist. + * @return null Configuration mutation list not applicable for this store. + */ @Override protected LinkedList getLogs() { // Unimplemented. return null; } + /** + * Configuration mutations applied directly in-memory. As such, there is no + * persistent configuration store. + * As there is no configuration store for versioning purposes, + * a conf store version is not applicable. + * @return null Conf store version not applicable for this store. + * @throws Exception + */ @Override public Version getConfStoreVersion() throws Exception { // Does nothing. return null; } + /** + * Configuration mutations not logged (i.e. not persisted). As such, they are + * not persisted and not versioned. Hence, no version information to store. + * @throws Exception + */ @Override public void storeVersion() throws Exception { // Does nothing. } + /** + * Configuration mutations not logged (i.e. not persisted). As such, they are + * not persisted and not versioned. Hence, a current version is not + * applicable. + * @return null A current version not applicable for this store. + */ @Override public Version getCurrentVersion() { // Does nothing. return null; } + /** + * Configuration mutations not logged (i.e. not persisted). As such, they are + * not persisted and not versioned. Hence, version is always compatible, + * since it is in-memory. + */ @Override public void checkVersion() { // Does nothing. (Version is always compatible since it's in memory) } + + @Override + public void close() throws IOException { + // Does nothing. + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java index 8f5dc6a273347..8350fa928d288 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java @@ -18,21 +18,17 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.server.resourcemanager.DBManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.proto.YarnServerCommonProtos; import org.apache.hadoop.yarn.server.records.Version; -import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.fusesource.leveldbjni.JniDBFactory; -import org.fusesource.leveldbjni.internal.NativeDB; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBComparator; import org.iq80.leveldb.DBException; @@ -52,9 +48,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Timer; -import java.util.TimerTask; -import java.util.function.Consumer; import static org.fusesource.leveldbjni.JniDBFactory.bytes; @@ -72,6 +65,8 @@ public class LeveldbConfigurationStore extends YarnConfigurationStore { private static final String CONF_VERSION_NAME = "conf-version-store"; private static final String CONF_VERSION_KEY = "conf-version"; private DB db; + private DBManager dbManager; + private DBManager versionDbManager; private DB versionDb; private long maxLogs; private Configuration conf; @@ -79,23 +74,25 @@ public class LeveldbConfigurationStore extends YarnConfigurationStore { @VisibleForTesting protected static final Version CURRENT_VERSION_INFO = Version .newInstance(0, 1); - private long compactionIntervalMsec; @Override public void initialize(Configuration config, Configuration schedConf, RMContext rmContext) throws IOException { this.conf = config; this.initSchedConf = schedConf; + this.dbManager = new DBManager(); + this.versionDbManager = new DBManager(); try { initDatabase(); this.maxLogs = config.getLong( YarnConfiguration.RM_SCHEDCONF_MAX_LOGS, YarnConfiguration.DEFAULT_RM_SCHEDCONF_LEVELDB_MAX_LOGS); - this.compactionIntervalMsec = config.getLong( + long compactionIntervalMsec = config.getLong( YarnConfiguration.RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS, YarnConfiguration .DEFAULT_RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS) * 1000; - startCompactionTimer(); + dbManager.startCompactionTimer(compactionIntervalMsec, + this.getClass().getSimpleName()); } catch (Exception e) { throw new IOException(e); } @@ -114,7 +111,7 @@ private void initDatabase() throws Exception { confOptions.createIfMissing(false); File confVersionFile = new File(confVersion.toString()); - versionDb = initDatabaseHelper(confVersionFile, confOptions, + versionDb = versionDbManager.initDatabase(confVersionFile, confOptions, this::initVersionDb); Path storeRoot = createStorageDir(DB_NAME); @@ -154,7 +151,7 @@ public byte[] findShortSuccessor(byte[] key) { }); LOG.info("Using conf database at {}", storeRoot); File dbFile = new File(storeRoot.toString()); - db = initDatabaseHelper(dbFile, options, this::initDb); + db = dbManager.initDatabase(dbFile, options, this::initDb); } private void initVersionDb(DB database) { @@ -170,30 +167,6 @@ private void initDb(DB database) { increaseConfigVersion(); } - private DB initDatabaseHelper(File configurationFile, Options options, - Consumer initMethod) throws Exception { - DB database; - try { - database = JniDBFactory.factory.open(configurationFile, options); - } catch (NativeDB.DBException e) { - if (e.isNotFound() || e.getMessage().contains(" does not exist ")) { - LOG.info("Creating configuration version/database at {}", - configurationFile); - options.createIfMissing(true); - try { - database = JniDBFactory.factory.open(configurationFile, options); - initMethod.accept(database); - } catch (DBException dbErr) { - throw new IOException(dbErr.getMessage(), dbErr); - } - } else { - throw e; - } - } - - return database; - } - private Path createStorageDir(String storageName) throws IOException { Path root = getStorageDir(storageName); FileSystem fs = FileSystem.getLocal(conf); @@ -212,12 +185,8 @@ private Path getStorageDir(String storageName) throws IOException { @Override public void close() throws IOException { - if (db != null) { - db.close(); - } - if (versionDb != null) { - versionDb.close(); - } + dbManager.close(); + versionDbManager.close(); } @Override @@ -313,28 +282,9 @@ public List getConfirmedConfHistory(long fromId) { return null; // unimplemented } - private void startCompactionTimer() { - if (compactionIntervalMsec > 0) { - Timer compactionTimer = new Timer( - this.getClass().getSimpleName() + " compaction timer", true); - compactionTimer.schedule(new CompactionTimerTask(), - compactionIntervalMsec, compactionIntervalMsec); - } - } - @Override public Version getConfStoreVersion() throws Exception { - Version version = null; - try { - byte[] data = db.get(bytes(VERSION_KEY)); - if (data != null) { - version = new VersionPBImpl(YarnServerCommonProtos.VersionProto - .parseFrom(data)); - } - } catch (DBException e) { - throw new IOException(e); - } - return version; + return dbManager.loadVersion(VERSION_KEY); } @VisibleForTesting @@ -350,37 +300,20 @@ protected DB getDB() { @Override public void storeVersion() throws Exception { - storeVersion(CURRENT_VERSION_INFO); - } - - @VisibleForTesting - protected void storeVersion(Version version) throws Exception { - byte[] data = ((VersionPBImpl) version).getProto() - .toByteArray(); try { - db.put(bytes(VERSION_KEY), data); + storeVersion(CURRENT_VERSION_INFO); } catch (DBException e) { throw new IOException(e); } } + @VisibleForTesting + protected void storeVersion(Version version) { + dbManager.storeVersion(VERSION_KEY, version); + } + @Override public Version getCurrentVersion() { return CURRENT_VERSION_INFO; } - - private class CompactionTimerTask extends TimerTask { - @Override - public void run() { - long start = Time.monotonicNow(); - LOG.info("Starting full compaction cycle"); - try { - db.compactRange(null, null); - } catch (DBException e) { - LOG.error("Error compacting database", e); - } - long duration = Time.monotonicNow() - start; - LOG.info("Full compaction cycle completed in {} msec", duration); - } - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java index 91bc47a9669a9..5f262be83d346 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java index ee53fd18b2b5c..ca8d582f82716 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java @@ -75,36 +75,68 @@ public boolean isMutationAllowed(UserGroupInformation user, for (QueueConfigInfo updateQueueInfo : confUpdate.getUpdateQueueInfo()) { queues.add(updateQueueInfo.getQueue()); } + + // Loop through all the queues. for (String queuePath : queues) { - String queueName = queuePath.lastIndexOf('.') != -1 ? - queuePath.substring(queuePath.lastIndexOf('.') + 1) : queuePath; QueueInfo queueInfo = null; - try { - queueInfo = rmContext.getScheduler() - .getQueueInfo(queueName, false, false); - } catch (IOException e) { - // Queue is not found, do nothing. - } String parentPath = queuePath; + + // For this queue, check if queue information exists for its children + // starting at the end of the queue. + // Keep this check going by moving up in the queue hierarchy until + // queue information has been found for one of its children. + String queueName; while (queueInfo == null) { - // We are adding a queue (whose parent we are possibly also adding). - // Check ACL of lowest parent queue which already exists. - parentPath = parentPath.substring(0, parentPath.lastIndexOf('.')); - String parentName = parentPath.lastIndexOf('.') != -1 ? - parentPath.substring(parentPath.lastIndexOf('.') + 1) : parentPath; + queueName = queueHasAChild(parentPath) ? + getLastChildForQueue(parentPath) : parentPath; try { queueInfo = rmContext.getScheduler() - .getQueueInfo(parentName, false, false); + .getQueueInfo(queueName, false, false); } catch (IOException e) { // Queue is not found, do nothing. } + + // Keep going up in the queue hierarchy. + parentPath = queueHasAChild(parentPath) ? + getQueueBeforeLastChild(parentPath) : parentPath; } + + // check if user has Admin access to this queue. Queue queue = ((MutableConfScheduler) rmContext.getScheduler()) .getQueue(queueInfo.getQueueName()); if (queue != null && !queue.hasAccess(QueueACL.ADMINISTER_QUEUE, user)) { return false; } } + return true; } + + /** + * Does the queue have a child? + * @param queue The queue that needs to be checked for a child. + * @return True if a "." exists in the queue name, signalling hierarchy. + */ + private boolean queueHasAChild(String queue) { + return queue.lastIndexOf('.') != -1; + } + + /** + * Get the last child name from a queue name. + * @param queue The queue that is checked for the last child. + * @return The last child of the queue. + */ + private String getLastChildForQueue(String queue) { + return queue.substring(queue.lastIndexOf('.') + 1); + } + + /** + * Get a queue name minus the last child. + * @param queue The queue that needs to be trimmed of its last child. + * @return Remaining queue name after its last child has been taken out. + */ + private String getQueueBeforeLastChild(String queue) { + return queue.substring(0, queue.lastIndexOf('.')); + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java index 8ab3e4437b264..4480bc34dcc48 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java @@ -99,7 +99,7 @@ public abstract void initialize(Configuration conf, Configuration schedConf, * Closes the configuration store, releasing any required resources. * @throws IOException on failure to close */ - public void close() throws IOException {} + public abstract void close() throws IOException; /** * Logs the configuration change to backing store. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java index 8c7ed375ab1ef..7276d3d01433c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -31,6 +31,7 @@ import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.data.ACL; +import java.io.IOException; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.ObjectInputStream; @@ -316,4 +317,11 @@ private static Object deserializeObject(byte[] bytes) throws Exception { private static T unsafeCast(Object o) throws ClassCastException { return (T)o; } + + @Override + public void close() throws IOException { + if (zkManager != null) { + zkManager.close(); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java index e684c2bf62006..d3e2f8981a819 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/preemption/PreemptionManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/preemption/PreemptionManager.java index 58110c9b6914e..47ae2ada22e6c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/preemption/PreemptionManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/preemption/PreemptionManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java index 7302ec5aa1c47..92af8e4e03875 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity .queuemanagement; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity .QueueManagementDynamicEditPolicy; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index f42351c85edd8..6507259f10cfd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -89,7 +89,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Represents an application attempt from the viewpoint of the FIFO or Capacity @@ -112,6 +112,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt { private AbstractContainerAllocator containerAllocator; + private boolean runnable; + /** * to hold the message if its app doesn't not get container from a node */ @@ -139,6 +141,7 @@ public FiCaSchedulerApp(ApplicationAttemptId applicationAttemptId, RMContext rmContext, Priority appPriority, boolean isAttemptRecovering, ActivitiesManager activitiesManager) { super(applicationAttemptId, user, queue, abstractUsersManager, rmContext); + this.runnable = true; RMApp rmApp = rmContext.getRMApps().get(getApplicationId()); @@ -609,7 +612,7 @@ public boolean apply(Resource cluster, ResourceCommitRequest * Licensed under the Apache License Version 2.0. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java index 862674d45e76e..b3416074271c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java @@ -27,9 +27,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.ListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.AMState; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index 4fe38d5b0c8b5..9f07bb8420e5d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -29,9 +29,9 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ReservationQueueConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ReservationQueueConfiguration.java index cf7f84e27abaa..ceabca42db27a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ReservationQueueConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ReservationQueueConfiguration.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileParser.java index 161405b76562b..41478ec1d536d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileParser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileParser.java @@ -16,8 +16,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocation; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfigurationException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.ConfigurableResource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java index 72c6c6801b358..e89682d789f92 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java @@ -126,6 +126,7 @@ private void loadQueue(String parentName, Element element, NodeList fields = element.getChildNodes(); boolean isLeaf = true; boolean isReservable = false; + boolean isMaxAMShareSet = false; for (int j = 0; j < fields.getLength(); j++) { Node fieldNode = fields.item(j); @@ -157,6 +158,7 @@ private void loadQueue(String parentName, Element element, float val = Float.parseFloat(text); val = Math.min(val, 1.0f); builder.queueMaxAMShares(queueName, val); + isMaxAMShareSet = true; } else if (MAX_CONTAINER_ALLOCATION.equals(field.getTagName())) { String text = getTrimmedTextData(field); ConfigurableResource val = @@ -220,7 +222,6 @@ private void loadQueue(String parentName, Element element, isLeaf = false; } } - // if a leaf in the alloc file is marked as type='parent' // then store it as a parent queue if (isLeaf && !"parent".equals(element.getAttribute("type"))) { @@ -230,10 +231,11 @@ private void loadQueue(String parentName, Element element, } } else { if (isReservable) { - throw new AllocationConfigurationException("The configuration settings" - + " for " + queueName + " are invalid. A queue element that " - + "contains child queue elements or that has the type='parent' " - + "attribute cannot also include a reservation element."); + throw new AllocationConfigurationException( + getErrorString(queueName, RESERVATION)); + } else if (isMaxAMShareSet) { + throw new AllocationConfigurationException( + getErrorString(queueName, MAX_AMSHARE)); } builder.configuredQueues(FSQueueType.PARENT, queueName); } @@ -253,6 +255,19 @@ private void loadQueue(String parentName, Element element, builder.getMaxQueueResources(), queueName); } + /** + * Set up the error string based on the supplied parent queueName and element. + * @param parentQueueName the parent queue name. + * @param element the element that should not be present for the parent queue. + * @return the error string. + */ + private String getErrorString(String parentQueueName, String element) { + return "The configuration settings" + + " for " + parentQueueName + " are invalid. A queue element that " + + "contains child queue elements or that has the type='parent' " + + "attribute cannot also include a " + element + " element."; + } + private String getTrimmedTextData(Element element) { return ((Text) element.getFirstChild()).getData().trim(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java index 7fec0a80ad864..aae1d5547a8f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java @@ -22,6 +22,7 @@ public class ConversionOptions { private DryRunResultHolder dryRunResultHolder; private boolean dryRun; private boolean noTerminalRuleCheck; + private boolean enableAsyncScheduler; public ConversionOptions(DryRunResultHolder dryRunResultHolder, boolean dryRun) { @@ -41,6 +42,14 @@ public boolean isNoRuleTerminalCheck() { return noTerminalRuleCheck; } + public void setEnableAsyncScheduler(boolean enableAsyncScheduler) { + this.enableAsyncScheduler = enableAsyncScheduler; + } + + public boolean isEnableAsyncScheduler() { + return enableAsyncScheduler; + } + public void handleWarning(String msg, Logger log) { if (dryRun) { dryRunResultHolder.addDryRunWarning(msg); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/DryRunResultHolder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/DryRunResultHolder.java index a570a12e02b9a..0bf2b95d8ba55 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/DryRunResultHolder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/DryRunResultHolder.java @@ -24,7 +24,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class DryRunResultHolder { private static final Logger LOG = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java index 5bd3b1a52ba81..b6730c73104bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigArgumentHandler.java @@ -29,7 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Parses arguments passed to the FS->CS converter. @@ -109,6 +109,9 @@ public enum CliOption { SKIP_VERIFICATION("skip verification", "s", "skip-verification", "Skips the verification of the converted configuration", false), + ENABLE_ASYNC_SCHEDULER("enable asynchronous scheduler", "a", "enable-async-scheduler", + "Enables the Asynchronous scheduler which decouples the CapacityScheduler" + + " scheduling from Node Heartbeats.", false), HELP("help", "h", "help", "Displays the list of options", false); private final String name; @@ -220,6 +223,8 @@ private FSConfigToCSConfigConverter prepareAndGetConverter( conversionOptions.setDryRun(dryRun); conversionOptions.setNoTerminalRuleCheck( cliParser.hasOption(CliOption.NO_TERMINAL_RULE_CHECK.shortSwitch)); + conversionOptions.setEnableAsyncScheduler( + cliParser.hasOption(CliOption.ENABLE_ASYNC_SCHEDULER.shortSwitch)); checkOptionPresent(cliParser, CliOption.YARN_SITE); checkOutputDefined(cliParser, dryRun); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java index e2281a8a845b7..2641680abbf4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigConverter.java @@ -59,7 +59,7 @@ import org.w3c.dom.Element; import org.w3c.dom.NodeList; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Converts Fair Scheduler configuration (site and fair-scheduler.xml) @@ -83,6 +83,9 @@ public class FSConfigToCSConfigConverter { private boolean preemptionEnabled = false; private int queueMaxAppsDefault; private float queueMaxAMShareDefault; + private Map userMaxApps; + private int userMaxAppsDefault; + private boolean autoCreateChildQueues = false; private boolean sizeBasedWeight = false; private boolean userAsDefaultQueue = false; @@ -99,6 +102,8 @@ public class FSConfigToCSConfigConverter { private boolean consoleMode = false; private boolean convertPlacementRules = false; + + public FSConfigToCSConfigConverter(FSConfigToCSConfigRuleHandler ruleHandler, ConversionOptions conversionOptions) { this.ruleHandler = ruleHandler; @@ -242,14 +247,13 @@ void convert(Configuration inputYarnSiteConfig) throws Exception { AllocationConfiguration allocConf = fs.getAllocationConfiguration(); queueMaxAppsDefault = allocConf.getQueueMaxAppsDefault(); + userMaxAppsDefault = allocConf.getUserMaxAppsDefault(); + userMaxApps = allocConf.getUserMaxApps(); queueMaxAMShareDefault = allocConf.getQueueMaxAMShareDefault(); convertedYarnSiteConfig = new Configuration(false); capacitySchedulerConfig = new Configuration(false); - checkUserMaxApps(allocConf); - checkUserMaxAppsDefault(allocConf); - convertYarnSiteXml(inputYarnSiteConfig, havePlacementPolicies); convertCapacitySchedulerXml(fs); @@ -270,7 +274,8 @@ private void convertYarnSiteXml(Configuration inputYarnSiteConfig, FSYarnSiteConverter siteConverter = new FSYarnSiteConverter(); siteConverter.convertSiteProperties(inputYarnSiteConfig, - convertedYarnSiteConfig, drfUsed); + convertedYarnSiteConfig, drfUsed, + conversionOptions.isEnableAsyncScheduler()); // See docs: "allow-undeclared-pools" and "user-as-default-queue" are // ignored if we have placement rules @@ -286,7 +291,9 @@ private void convertYarnSiteXml(Configuration inputYarnSiteConfig, private void convertCapacitySchedulerXml(FairScheduler fs) { FSParentQueue rootQueue = fs.getQueueManager().getRootQueue(); - emitDefaultMaxApplications(); + emitDefaultQueueMaxParallelApplications(); + emitDefaultUserMaxParallelApplications(); + emitUserMaxParallelApplications(); emitDefaultMaxAMShare(); FSQueueConverter queueConverter = FSQueueConverterBuilder.create() @@ -321,14 +328,30 @@ private void convertCapacitySchedulerXml(FairScheduler fs) { } } - private void emitDefaultMaxApplications() { + private void emitDefaultQueueMaxParallelApplications() { if (queueMaxAppsDefault != Integer.MAX_VALUE) { capacitySchedulerConfig.set( - CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS, + PREFIX + "max-parallel-apps", String.valueOf(queueMaxAppsDefault)); } } + private void emitDefaultUserMaxParallelApplications() { + if (userMaxAppsDefault != Integer.MAX_VALUE) { + capacitySchedulerConfig.set( + PREFIX + "user.max-parallel-apps", + String.valueOf(userMaxAppsDefault)); + } + } + + private void emitUserMaxParallelApplications() { + userMaxApps + .forEach((user, apps) -> { + capacitySchedulerConfig.setInt( + PREFIX + "user." + user + ".max-parallel-apps", apps); + }); + } + private void emitDefaultMaxAMShare() { if (queueMaxAMShareDefault == QUEUE_MAX_AM_SHARE_DISABLED) { capacitySchedulerConfig.setFloat( @@ -373,19 +396,6 @@ private void checkReservationSystem(Configuration conf) { } } - private void checkUserMaxApps(AllocationConfiguration allocConf) { - if (allocConf.getUserMaxApps() != null - && allocConf.getUserMaxApps().size() > 0) { - ruleHandler.handleUserMaxApps(); - } - } - - private void checkUserMaxAppsDefault(AllocationConfiguration allocConf) { - if (allocConf.getUserMaxAppsDefault() > 0) { - ruleHandler.handleUserMaxAppsDefault(); - } - } - private boolean isDrfUsed(FairScheduler fs) { FSQueue rootQueue = fs.getQueueManager().getRootQueue(); AllocationConfiguration allocConf = fs.getAllocationConfiguration(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigRuleHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigRuleHandler.java index 3a2d363ca1df5..e7a6138b8c279 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigRuleHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSConfigToCSConfigRuleHandler.java @@ -28,7 +28,7 @@ import org.apache.commons.lang3.StringUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration; import org.slf4j.Logger; @@ -170,14 +170,6 @@ public void handleChildQueueCount(String queue, int count) { } } - public void handleUserMaxApps() { - handle(USER_MAX_RUNNING_APPS, "", null); - } - - public void handleUserMaxAppsDefault() { - handle(USER_MAX_APPS_DEFAULT, "", null); - } - public void handleDynamicMaxAssign() { handle(DYNAMIC_MAX_ASSIGN, FairSchedulerConfiguration.DYNAMIC_MAX_ASSIGN, null); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSQueueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSQueueConverter.java index a84b6cbe0d1a8..cc52309b47bcd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSQueueConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSQueueConverter.java @@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.ConfigurableResource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue; @@ -42,7 +43,7 @@ */ public class FSQueueConverter { public static final float QUEUE_MAX_AM_SHARE_DISABLED = -1.0f; - private static final int MAX_RUNNING_APPS_UNSET = Integer.MIN_VALUE; + private static final int MAX_RUNNING_APPS_UNSET = Integer.MAX_VALUE; private static final String FAIR_POLICY = "fair"; private static final String FIFO_POLICY = "fifo"; @@ -78,7 +79,7 @@ public void convertQueueHierarchy(FSQueue queue) { emitChildQueues(queueName, children); emitMaxAMShare(queueName, queue); - emitMaxRunningApps(queueName, queue); + emitMaxParallelApps(queueName, queue); emitMaxAllocations(queueName, queue); emitPreemptionDisabled(queueName, queue); @@ -137,14 +138,14 @@ private void emitMaxAMShare(String queueName, FSQueue queue) { /** * <maxRunningApps> - * ==> yarn.scheduler.capacity.<queue-name>.maximum-applications. + * ==> yarn.scheduler.capacity.<queue-name>.max-parallel-apps. * @param queueName * @param queue */ - private void emitMaxRunningApps(String queueName, FSQueue queue) { + private void emitMaxParallelApps(String queueName, FSQueue queue) { if (queue.getMaxRunningApps() != MAX_RUNNING_APPS_UNSET && queue.getMaxRunningApps() != queueMaxAppsDefault) { - capacitySchedulerConfig.set(PREFIX + queueName + ".maximum-applications", + capacitySchedulerConfig.set(PREFIX + queueName + ".max-parallel-apps", String.valueOf(queue.getMaxRunningApps())); } } @@ -225,7 +226,8 @@ private void emitPreemptionDisabled(String queueName, FSQueue queue) { * @param queueName */ private void emitAutoCreateChildQueue(String queueName, FSQueue queue) { - if (autoCreateChildQueues && !queue.getChildQueues().isEmpty()) { + if (autoCreateChildQueues && !queue.getChildQueues().isEmpty() + && !queueName.equals(CapacitySchedulerConfiguration.ROOT)) { capacitySchedulerConfig.setBoolean(PREFIX + queueName + ".auto-create-child-queue.enabled", true); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java index 25db4d0c6b6ec..4a5245befa2a8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/FSYarnSiteConverter.java @@ -38,7 +38,7 @@ public class FSYarnSiteConverter { @SuppressWarnings({"deprecation", "checkstyle:linelength"}) public void convertSiteProperties(Configuration conf, - Configuration yarnSiteConfig, boolean drfUsed) { + Configuration yarnSiteConfig, boolean drfUsed, boolean enableAsyncScheduler) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName()); @@ -146,6 +146,10 @@ public void convertSiteProperties(Configuration conf, CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, DominantResourceCalculator.class.getCanonicalName()); } + + if (enableAsyncScheduler) { + yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); + } } public boolean isPreemptionEnabled() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java index e82034140815e..83af8b64945b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java @@ -22,7 +22,7 @@ import java.util.Collection; import java.util.Comparator; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.Resource; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java index 0b412d3ee003f..7768b3c5c6c42 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java @@ -34,7 +34,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Makes scheduling decisions by trying to equalize shares of memory. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java index 365340151fcd5..bcb4a45a7005f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java @@ -33,7 +33,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoAppAttempt.java index 05a5d7bad0286..fb5850d61b553 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoAppAttempt.java @@ -83,7 +83,7 @@ public RMContainer allocate(NodeType type, FiCaSchedulerNode node, // Update consumption and track allocations ContainerRequest containerRequest = appSchedulingInfo.allocate( - type, node, schedulerKey, container); + type, node, schedulerKey, rmContainer); attemptResourceUsage.incUsed(node.getPartition(), container.getResource()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 9bd2a111cc74b..9ec4822e513fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java index a757ea527afff..f9fcdfdd5312d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java @@ -41,7 +41,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Common node sorting class which will do sorting based on policy spec. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SimpleCandidateNodeSet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SimpleCandidateNodeSet.java index 31a21705d91ab..b120a320bbfae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SimpleCandidateNodeSet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SimpleCandidateNodeSet.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java index 9898051bd9bc2..450367c0fb092 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.collections.IteratorUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.DiagnosticsCollector; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java index 9e66582e04f1b..863d8e228b4de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java @@ -21,7 +21,7 @@ import java.util.*; import java.util.concurrent.ConcurrentSkipListSet; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java index 7db70e6823ff5..aed0dbf5e1173 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java @@ -44,7 +44,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.security.MasterKeyData; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * AMRM-tokens are per ApplicationAttempt. If users redistribute their diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java index 4047bd5731a70..09b6cec6fd56e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java @@ -22,7 +22,7 @@ import java.util.Map; import javax.crypto.SecretKey; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index fd8935debbcaf..b4da08f52fea9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -26,7 +26,6 @@ import java.util.Collection; import java.util.Collections; import java.util.Date; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -74,8 +73,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; import org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Service to renew application delegation tokens. */ @@ -115,6 +114,7 @@ public class DelegationTokenRenewer extends AbstractService { private volatile boolean isServiceStarted; private LinkedBlockingQueue pendingEventQueue; + private boolean alwaysCancelDelegationTokens; private boolean tokenKeepAliveEnabled; private boolean hasProxyUserPrivileges; private long credentialsValidTimeRemaining; @@ -122,7 +122,7 @@ public class DelegationTokenRenewer extends AbstractService { private long tokenRenewerThreadRetryInterval; private int tokenRenewerThreadRetryMaxAttempts; private final Map> futures = - new HashMap<>(); + new ConcurrentHashMap<>(); private boolean delegationTokenRenewerPoolTrackerFlag = true; // this config is supposedly not used by end-users. @@ -137,6 +137,9 @@ public DelegationTokenRenewer() { @Override protected void serviceInit(Configuration conf) throws Exception { + this.alwaysCancelDelegationTokens = + conf.getBoolean(YarnConfiguration.RM_DELEGATION_TOKEN_ALWAYS_CANCEL, + YarnConfiguration.DEFAULT_RM_DELEGATION_TOKEN_ALWAYS_CANCEL); this.hasProxyUserPrivileges = conf.getBoolean(YarnConfiguration.RM_PROXY_USER_PRIVILEGES_ENABLED, YarnConfiguration.DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED); @@ -268,7 +271,7 @@ protected void serviceStop() { * */ @VisibleForTesting - protected static class DelegationTokenToRenew { + protected class DelegationTokenToRenew { public final Token token; public final Collection referringAppIds; public final Configuration conf; @@ -298,7 +301,7 @@ public DelegationTokenToRenew(Collection applicationIds, this.conf = conf; this.expirationDate = expirationDate; this.timerTask = null; - this.shouldCancelAtEnd = shouldCancelAtEnd; + this.shouldCancelAtEnd = shouldCancelAtEnd | alwaysCancelDelegationTokens; } public void setTimerTask(RenewalTimerTask tTask) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/NMTokenSecretManagerInRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/NMTokenSecretManagerInRM.java index f702c71448c85..c478143fcd5fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/NMTokenSecretManagerInRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/NMTokenSecretManagerInRM.java @@ -38,7 +38,7 @@ import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager; import org.apache.hadoop.yarn.server.security.MasterKeyData; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; public class NMTokenSecretManagerInRM extends BaseNMTokenSecretManager { @@ -197,8 +197,8 @@ public NMToken createAndGetNMToken(String applicationSubmitter, NMToken nmToken = null; if (nodeSet != null) { if (!nodeSet.contains(container.getNodeId())) { - LOG.info("Sending NMToken for nodeId : " + container.getNodeId() - + " for container : " + container.getId()); + LOG.debug("Sending NMToken for nodeId : {} for container : {}", + container.getNodeId(), container.getId()); Token token = createNMToken(container.getId().getApplicationAttemptId(), container.getNodeId(), applicationSubmitter); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java index f13608c0ac6b3..b4e9563de4dce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.security; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java index 3ebc395deb1c4..88dc5c21e4fd4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java @@ -22,7 +22,7 @@ import java.util.Timer; import java.util.TimerTask; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java index d13e8a115b499..5db9f592d17fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java @@ -38,7 +38,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * A ResourceManager specific delegation token secret manager. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/VolumeManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/VolumeManagerImpl.java index 2d682c7255981..f36046c2664f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/VolumeManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/VolumeManagerImpl.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.volume.csi; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/lifecycle/VolumeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/lifecycle/VolumeImpl.java index 186742b107fdf..5902cc5cf8323 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/lifecycle/VolumeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/lifecycle/VolumeImpl.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.volume.csi.lifecycle; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.CsiAdaptorProtocol; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java index 38a43d576a7c9..4d071a68038f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java index f6eb2adf726c6..a31434b3adc6c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; +import com.google.inject.Inject; import com.google.inject.Singleton; import com.sun.jersey.api.json.JSONConfiguration; import com.sun.jersey.api.json.JSONJAXBContext; @@ -28,6 +29,10 @@ import javax.ws.rs.ext.Provider; import javax.xml.bind.JAXBContext; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.*; import org.apache.hadoop.yarn.webapp.RemoteExceptionData; @@ -36,9 +41,17 @@ @Provider public class JAXBContextResolver implements ContextResolver { + private static final Log LOG = + LogFactory.getLog(JAXBContextResolver.class.getName()); + private final Map typesContextMap; public JAXBContextResolver() throws Exception { + this(new Configuration()); + } + + @Inject + public JAXBContextResolver(Configuration conf) throws Exception { JAXBContext context; JAXBContext unWrappedRootContext; @@ -65,17 +78,54 @@ public JAXBContextResolver() throws Exception { DelegationToken.class, AppQueue.class, AppPriority.class, ResourceOptionInfo.class }; + ArrayList finalcTypesList = new ArrayList<>(); + ArrayList finalRootUnwrappedTypesList = new ArrayList<>(); + + Collections.addAll(finalcTypesList, cTypes); + Collections.addAll(finalRootUnwrappedTypesList, rootUnwrappedTypes); + + // Add Custom DAO Classes + Class[] daoClasses = null; + Class[] unwrappedDaoClasses = null; + boolean loadCustom = true; + try { + daoClasses = conf + .getClasses(YarnConfiguration.YARN_HTTP_WEBAPP_CUSTOM_DAO_CLASSES); + unwrappedDaoClasses = conf.getClasses( + YarnConfiguration.YARN_HTTP_WEBAPP_CUSTOM_UNWRAPPED_DAO_CLASSES); + } catch (Exception e) { + LOG.warn("Failed to load custom dao class: " + e); + loadCustom = false; + } + + if (loadCustom) { + if (daoClasses != null) { + Collections.addAll(finalcTypesList, daoClasses); + LOG.debug("Added custom dao classes: " + Arrays.toString(daoClasses)); + } + if (unwrappedDaoClasses != null) { + Collections.addAll(finalRootUnwrappedTypesList, unwrappedDaoClasses); + LOG.debug("Added custom Unwrapped dao classes: " + + Arrays.toString(unwrappedDaoClasses)); + } + } + + final Class[] finalcTypes = finalcTypesList + .toArray(new Class[finalcTypesList.size()]); + final Class[] finalRootUnwrappedTypes = finalRootUnwrappedTypesList + .toArray(new Class[finalRootUnwrappedTypesList.size()]); + this.typesContextMap = new HashMap(); context = new JSONJAXBContext(JSONConfiguration.natural().rootUnwrapping(false) - .build(), cTypes); + .build(), finalcTypes); unWrappedRootContext = new JSONJAXBContext(JSONConfiguration.natural().rootUnwrapping(true) - .build(), rootUnwrappedTypes); - for (Class type : cTypes) { + .build(), finalRootUnwrappedTypes); + for (Class type : finalcTypes) { typesContextMap.put(type, context); } - for (Class type : rootUnwrappedTypes) { + for (Class type : finalRootUnwrappedTypes) { typesContextMap.put(type, unWrappedRootContext); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java index 806b63640990b..009a012e3bcd0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; @@ -26,6 +27,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo; import org.apache.hadoop.yarn.util.resource.ResourceUtils; +import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; @@ -60,7 +62,37 @@ protected void render(Block html) { ClusterMetricsInfo clusterMetrics = new ClusterMetricsInfo(this.rm); DIV div = html.div().$class("metrics"); - + + Resource usedResources; + Resource totalResources; + Resource reservedResources; + int allocatedContainers; + if (clusterMetrics.getCrossPartitionMetricsAvailable()) { + allocatedContainers = + clusterMetrics.getTotalAllocatedContainersAcrossPartition(); + usedResources = + clusterMetrics.getTotalUsedResourcesAcrossPartition().getResource(); + totalResources = + clusterMetrics.getTotalClusterResourcesAcrossPartition() + .getResource(); + reservedResources = + clusterMetrics.getTotalReservedResourcesAcrossPartition() + .getResource(); + // getTotalUsedResourcesAcrossPartition includes reserved resources. + Resources.subtractFrom(usedResources, reservedResources); + } else { + allocatedContainers = clusterMetrics.getContainersAllocated(); + usedResources = Resource.newInstance( + clusterMetrics.getAllocatedMB(), + (int) clusterMetrics.getAllocatedVirtualCores()); + totalResources = Resource.newInstance( + clusterMetrics.getTotalMB(), + (int) clusterMetrics.getTotalVirtualCores()); + reservedResources = Resource.newInstance( + clusterMetrics.getReservedMB(), + (int) clusterMetrics.getReservedVirtualCores()); + } + div.h3("Cluster Metrics"). table("#metricsoverview"). thead().$class("ui-widget-header"). @@ -70,12 +102,11 @@ protected void render(Block html) { th().$class("ui-state-default").__("Apps Running").__(). th().$class("ui-state-default").__("Apps Completed").__(). th().$class("ui-state-default").__("Containers Running").__(). - th().$class("ui-state-default").__("Memory Used").__(). - th().$class("ui-state-default").__("Memory Total").__(). - th().$class("ui-state-default").__("Memory Reserved").__(). - th().$class("ui-state-default").__("VCores Used").__(). - th().$class("ui-state-default").__("VCores Total").__(). - th().$class("ui-state-default").__("VCores Reserved").__(). + th().$class("ui-state-default").__("Used Resources").__(). + th().$class("ui-state-default").__("Total Resources").__(). + th().$class("ui-state-default").__("Reserved Resources").__(). + th().$class("ui-state-default").__("Physical Mem Used %").__(). + th().$class("ui-state-default").__("Physical VCores Used %").__(). __(). __(). tbody().$class("ui-widget-content"). @@ -89,13 +120,12 @@ protected void render(Block html) { clusterMetrics.getAppsFailed() + clusterMetrics.getAppsKilled() ) ). - td(String.valueOf(clusterMetrics.getContainersAllocated())). - td(StringUtils.byteDesc(clusterMetrics.getAllocatedMB() * BYTES_IN_MB)). - td(StringUtils.byteDesc(clusterMetrics.getTotalMB() * BYTES_IN_MB)). - td(StringUtils.byteDesc(clusterMetrics.getReservedMB() * BYTES_IN_MB)). - td(String.valueOf(clusterMetrics.getAllocatedVirtualCores())). - td(String.valueOf(clusterMetrics.getTotalVirtualCores())). - td(String.valueOf(clusterMetrics.getReservedVirtualCores())). + td(String.valueOf(allocatedContainers)). + td(usedResources.getFormattedString()). + td(totalResources.getFormattedString()). + td(reservedResources.getFormattedString()). + td(String.valueOf(clusterMetrics.getUtilizedMBPercent())). + td(String.valueOf(clusterMetrics.getUtilizedVirtualCoresPercent())). __(). __().__(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index 8e7974043ed9f..8e8a2610f0ce7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -22,6 +22,7 @@ import org.apache.commons.text.StringEscapeUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; @@ -30,6 +31,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.util.Times; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; @@ -85,15 +87,23 @@ protected void render(Block html) { .th(".allocationTags", "Allocation Tags") .th(".mem", "Mem Used") .th(".mem", "Mem Avail") + .th(".mem", "Phys Mem Used %") .th(".vcores", "VCores Used") - .th(".vcores", "VCores Avail"); + .th(".vcores", "VCores Avail") + .th(".vcores", "Phys VCores Used %") + .th(".gpus", "GPUs Used") + .th(".gpus", "GPUs Avail"); } else { trbody.th(".containers", "Running Containers (G)") .th(".allocationTags", "Allocation Tags") .th(".mem", "Mem Used (G)") .th(".mem", "Mem Avail (G)") + .th(".mem", "Phys Mem Used %") .th(".vcores", "VCores Used (G)") .th(".vcores", "VCores Avail (G)") + .th(".vcores", "Phys VCores Used %") + .th(".gpus", "GPUs Used (G)") + .th(".gpus", "GPUs Avail (G)") .th(".containers", "Running Containers (O)") .th(".mem", "Mem Used (O)") .th(".vcores", "VCores Used (O)") @@ -165,6 +175,17 @@ protected void render(Block html) { nodeTableData.append("\",\"").append(httpAddress).append("\",").append("\""); } + Integer gpuIndex = ResourceUtils.getResourceTypeIndex() + .get(ResourceInformation.GPU_URI); + long usedGPUs = 0; + long availableGPUs = 0; + if (gpuIndex != null && info.getUsedResource() != null + && info.getAvailableResource() != null) { + usedGPUs = info.getUsedResource().getResource() + .getResourceValue(ResourceInformation.GPU_URI); + availableGPUs = info.getAvailableResource().getResource() + .getResourceValue(ResourceInformation.GPU_URI); + } nodeTableData.append("
    ") .append(Times.format(info.getLastHealthUpdate())).append("\",\"") @@ -176,9 +197,18 @@ protected void render(Block html) { .append("\",\"").append("
    ") .append(StringUtils.byteDesc(availableMemory * BYTES_IN_MB)) - .append("\",\"").append(String.valueOf(info.getUsedVirtualCores())) + .append("\",\"") + .append(String.valueOf((int) info.getMemUtilization())) + .append("\",\"") + .append(String.valueOf(info.getUsedVirtualCores())) .append("\",\"") .append(String.valueOf(info.getAvailableVirtualCores())) + .append("\",\"") + .append(String.valueOf((int) info.getVcoreUtilization())) + .append("\",\"") + .append(String.valueOf(usedGPUs)) + .append("\",\"") + .append(String.valueOf(availableGPUs)) .append("\",\""); // If opportunistic containers are enabled, add extra fields. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java index e8da0cf9ea94e..c90d8ce5dc7dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java @@ -69,8 +69,10 @@ public class RMAppsBlock extends AppsBlock { new ColumnHeader(".runningcontainer", "Running Containers"), new ColumnHeader(".allocatedCpu", "Allocated CPU VCores"), new ColumnHeader(".allocatedMemory", "Allocated Memory MB"), + new ColumnHeader(".allocatedGpu", "Allocated GPUs"), new ColumnHeader(".reservedCpu", "Reserved CPU VCores"), new ColumnHeader(".reservedMemory", "Reserved Memory MB"), + new ColumnHeader(".reservedGpu", "Reserved GPUs"), new ColumnHeader(".queuePercentage", "% of Queue"), new ColumnHeader(".clusterPercentage", "% of Cluster"), new ColumnHeader(".progress", "Progress"), @@ -119,6 +121,7 @@ protected void renderData(Block html) { String blacklistedNodesCount = "N/A"; RMApp rmApp = rm.getRMContext().getRMApps() .get(appAttemptId.getApplicationId()); + boolean isAppInCompletedState = false; if (rmApp != null) { RMAppAttempt appAttempt = rmApp.getRMAppAttempt(appAttemptId); Set nodes = @@ -126,6 +129,7 @@ protected void renderData(Block html) { if (nodes != null) { blacklistedNodesCount = String.valueOf(nodes.size()); } + isAppInCompletedState = rmApp.isAppInCompletedStates(); } String percent = StringUtils.format("%.1f", app.getProgress()); appsTableData @@ -171,12 +175,18 @@ protected void renderData(Block html) { .append(app.getAllocatedMemoryMB() == -1 ? "N/A" : String.valueOf(app.getAllocatedMemoryMB())) .append("\",\"") + .append((isAppInCompletedState && app.getAllocatedGpus() <= 0) + ? UNAVAILABLE : String.valueOf(app.getAllocatedGpus())) + .append("\",\"") .append(app.getReservedCpuVcores() == -1 ? "N/A" : String .valueOf(app.getReservedCpuVcores())) .append("\",\"") .append(app.getReservedMemoryMB() == -1 ? "N/A" : String.valueOf(app.getReservedMemoryMB())) .append("\",\"") + .append((isAppInCompletedState && app.getReservedGpus() <= 0) + ? UNAVAILABLE : String.valueOf(app.getReservedGpus())) + .append("\",\"") .append(queuePercent) .append("\",\"") .append(clusterPercent) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java index 316e7ed51959d..5075d2505635c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java @@ -55,6 +55,7 @@ public void setup() { bind(RMWebServices.class); bind(GenericExceptionHandler.class); bind(RMWebApp.class).toInstance(this); + bindExternalClasses(); if (rm != null) { bind(ResourceManager.class).toInstance(rm); @@ -97,6 +98,16 @@ public String getRedirectPath() { return super.getRedirectPath(); } + private void bindExternalClasses() { + YarnConfiguration yarnConf = new YarnConfiguration(rm.getConfig()); + Class[] externalClasses = yarnConf + .getClasses(YarnConfiguration.YARN_HTTP_WEBAPP_EXTERNAL_CLASSES); + for (Class c : externalClasses) { + bind(c); + } + } + + private String buildRedirectPath() { // make a copy of the original configuration so not to mutate it. Also use // an YarnConfiguration to force loading of yarn-site.xml. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java index d197be42769ba..ce0f90ab4dd93 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java @@ -52,7 +52,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Injector; import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index dfdaba9e59a49..5bc6bf7d0a4bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -217,7 +217,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.google.inject.Inject; import com.google.inject.Singleton; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java index a291e0548dbc6..e511d1122e021 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java @@ -21,13 +21,17 @@ import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.YarnWebParams.QUEUE_NAME; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.util.StringHelper; import org.apache.hadoop.yarn.webapp.Controller; +import org.apache.hadoop.yarn.webapp.View; import org.apache.hadoop.yarn.webapp.YarnWebParams; import com.google.inject.Inject; @@ -92,9 +96,52 @@ public void scheduler() { render(FairSchedulerPage.class); return; } - - setTitle("Default Scheduler"); - render(DefaultSchedulerPage.class); + + if (rs instanceof FifoScheduler) { + setTitle("FIFO Scheduler"); + render(DefaultSchedulerPage.class); + return; + } + + renderOtherPluginScheduler(rm); + } + + private void renderOtherPluginScheduler(ResourceManager rm) { + ResourceScheduler rs = rm.getResourceScheduler(); + String schedulerName = rs.getClass().getSimpleName(); + + Class cls = PluginSchedulerPageHelper + .getPageClass(rm.getConfig()); + if (cls != null) { + setTitle(schedulerName); + render(cls); + } else { + LOG.warn( + "Render default scheduler page as scheduler page configured doesn't exist"); + setTitle("Default Scheduler"); + render(DefaultSchedulerPage.class); + } + } + + static class PluginSchedulerPageHelper { + private static boolean hasLoaded = false; + private static Class pageClass = null; + public static Class getPageClass(Configuration conf) { + if (!hasLoaded) { + loadPluginSchedulerPageClass(conf); + hasLoaded = true; + } + return pageClass; + } + + private static void loadPluginSchedulerPageClass(Configuration conf) { + Class configuredClass = conf + .getClass(YarnConfiguration.YARN_HTTP_WEBAPP_SCHEDULER_PAGE, null); + if (!View.class.isAssignableFrom(configuredClass)) { + return; + } + pageClass = (Class) configuredClass; + } } public void queue() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivitiesInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivitiesInfo.java index 668f33bcce4e3..3b2da51bcb4a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivitiesInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivitiesInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivityNodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivityNodeInfo.java index 257147743f368..3fa601d39c8ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivityNodeInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ActivityNodeInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; -import com.google.common.base.Strings; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityNode; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppActivitiesInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppActivitiesInfo.java index 16d75e3bffc14..3b181dd9ed210 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppActivitiesInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppActivitiesInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index a1d701a41436e..0d9a9a9b7dd20 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -50,8 +50,8 @@ import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; @XmlRootElement(name = "app") @XmlAccessorType(XmlAccessType.FIELD) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppRequestAllocationInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppRequestAllocationInfo.java index f12f83d94bced..4de34e4c324ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppRequestAllocationInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppRequestAllocationInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityNode; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java index f4d4070a89c5d..e38ab0d0b428d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java @@ -42,6 +42,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.PlanQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueCapacities; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity. + CapacitySchedulerConfiguration.RESOURCE_PATTERN; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity. + CapacitySchedulerConfiguration.CAPACITY; + @XmlRootElement @XmlAccessorType(XmlAccessType.FIELD) @XmlSeeAlso({CapacitySchedulerLeafQueueInfo.class}) @@ -61,6 +66,7 @@ public class CapacitySchedulerQueueInfo { protected float absoluteUsedCapacity; protected int numApplications; protected String queueName; + protected boolean isAbsoluteResource; protected QueueState state; protected CapacitySchedulerQueueInfoList queues; protected ResourceInfo resourcesUsed; @@ -77,6 +83,8 @@ public class CapacitySchedulerQueueInfo { protected QueueAclsInfo queueAcls; protected int queuePriority; protected String orderingPolicyInfo; + protected boolean autoCreateChildQueueEnabled; + protected LeafQueueTemplateInfo leafQueueTemplate; CapacitySchedulerQueueInfo() { }; @@ -151,6 +159,15 @@ public class CapacitySchedulerQueueInfo { orderingPolicyInfo = ((ParentQueue) q).getQueueOrderingPolicy() .getConfigName(); } + + String configuredCapacity = conf.get( + CapacitySchedulerConfiguration.getQueuePrefix(queuePath) + CAPACITY); + isAbsoluteResource = (configuredCapacity != null) + && RESOURCE_PATTERN.matcher(configuredCapacity).find(); + + autoCreateChildQueueEnabled = conf. + isAutoCreateChildQueueEnabled(queuePath); + leafQueueTemplate = new LeafQueueTemplateInfo(conf, queuePath); } protected void populateQueueResourceUsage(ResourceUsage queueResourceUsage) { @@ -203,6 +220,10 @@ public long getPendingContainers() { return pendingContainers; } + public boolean isAbsoluteResource() { + return isAbsoluteResource; + } + public String getQueueName() { return this.queueName; } @@ -276,4 +297,12 @@ public String getOrderingPolicyInfo() { public boolean isLeafQueue() { return getQueues() == null; } + + public boolean isAutoCreateChildQueueEnabled() { + return autoCreateChildQueueEnabled; + } + + public LeafQueueTemplateInfo getLeafQueueTemplate() { + return leafQueueTemplate; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java index d6e4828fbc157..b66c4d997a79e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue; @XmlRootElement(name = "clusterMetrics") @XmlAccessorType(XmlAccessType.FIELD) @@ -54,6 +55,8 @@ public class ClusterMetricsInfo { private long totalMB; private long totalVirtualCores; + private int utilizedMBPercent; + private int utilizedVirtualCoresPercent; private int totalNodes; private int lostNodes; private int unhealthyNodes; @@ -69,6 +72,14 @@ public class ClusterMetricsInfo { // Total registered resources of the cluster, including all partitions private ResourceInfo totalClusterResourcesAcrossPartition; + // Total reserved resources of the cluster, including all partitions. + private ResourceInfo totalReservedResourcesAcrossPartition; + + // Total allocated containers across all partitions. + private int totalAllocatedContainersAcrossPartition; + + private boolean crossPartitionMetricsAvailable = false; + public ClusterMetricsInfo() { } // JAXB needs this @@ -115,11 +126,24 @@ public ClusterMetricsInfo(final ResourceScheduler rs) { cs.getRootQueue().getQueueResourceUsage().getAllUsed()); totalClusterResourcesAcrossPartition = new ResourceInfo( cs.getClusterResource()); + totalReservedResourcesAcrossPartition = new ResourceInfo( + cs.getRootQueue().getQueueResourceUsage().getAllReserved()); + totalAllocatedContainersAcrossPartition = + ((ParentQueue) cs.getRootQueue()).getNumContainers(); + crossPartitionMetricsAvailable = true; } } else { this.totalMB = availableMB + allocatedMB; this.totalVirtualCores = availableVirtualCores + allocatedVirtualCores; } + long baseMem = this.totalMB; + this.utilizedMBPercent = baseMem <= 0 ? 0 : + (int) (clusterMetrics.getUtilizedMB() * 100 / baseMem); + long baseCores = this.totalVirtualCores; + this.utilizedVirtualCoresPercent = baseCores <= 0 ? 0 : + (int) (clusterMetrics.getUtilizedVirtualCores() * 100 / + baseCores); + this.activeNodes = clusterMetrics.getNumActiveNMs(); this.lostNodes = clusterMetrics.getNumLostNMs(); this.unhealthyNodes = clusterMetrics.getUnhealthyNMs(); @@ -239,6 +263,14 @@ public int getShutdownNodes() { return this.shutdownNodes; } + public int getUtilizedMBPercent() { + return utilizedMBPercent; + } + + public int getUtilizedVirtualCoresPercent() { + return utilizedVirtualCoresPercent; + } + public void setContainersReserved(int containersReserved) { this.containersReserved = containersReserved; } @@ -343,7 +375,27 @@ public ResourceInfo getTotalUsedResourcesAcrossPartition() { return totalUsedResourcesAcrossPartition; } + public void setUtilizedMBPercent(int utilizedMBPercent) { + this.utilizedMBPercent = utilizedMBPercent; + } + + public void setUtilizedVirtualCoresPercent(int utilizedVirtualCoresPercent) { + this.utilizedVirtualCoresPercent = utilizedVirtualCoresPercent; + } + public ResourceInfo getTotalClusterResourcesAcrossPartition() { return totalClusterResourcesAcrossPartition; } + + public ResourceInfo getTotalReservedResourcesAcrossPartition() { + return totalReservedResourcesAcrossPartition; + } + + public int getTotalAllocatedContainersAcrossPartition() { + return totalAllocatedContainersAcrossPartition; + } + + public boolean getCrossPartitionMetricsAvailable() { + return crossPartitionMetricsAvailable; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java index f28e5b298ac9d..9804a36099cf5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java @@ -60,6 +60,7 @@ public class FairSchedulerQueueInfo { private ResourceInfo reservedResources; private ResourceInfo maxContainerAllocation; + private long pendingContainers; private long allocatedContainers; private long reservedContainers; @@ -108,6 +109,7 @@ public FairSchedulerQueueInfo(FSQueue queue, FairScheduler scheduler) { allocatedContainers = queue.getMetrics().getAllocatedContainers(); reservedContainers = queue.getMetrics().getReservedContainers(); + pendingContainers = queue.getMetrics().getPendingContainers(); if (allocConf.isReservable(queueName) && !allocConf.getShowReservationAsQueues(queueName)) { @@ -122,6 +124,8 @@ public long getAllocatedContainers() { return allocatedContainers; } + public long getPendingContainers() { return pendingContainers; } + public long getReservedContainers() { return reservedContainers; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LeafQueueTemplateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LeafQueueTemplateInfo.java new file mode 100644 index 0000000000000..f528a47449b0e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LeafQueueTemplateInfo.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.ArrayList; + +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AUTO_CREATED_LEAF_QUEUE_TEMPLATE_PREFIX; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT; + +/** + * This class stores the LeafQueue Template configuration. + */ +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +public class LeafQueueTemplateInfo { + + private ArrayList property = new ArrayList<>(); + + public LeafQueueTemplateInfo() { + } // JAXB needs this + + public LeafQueueTemplateInfo(Configuration conf, String queuePath) { + String configPrefix = CapacitySchedulerConfiguration. + getQueuePrefix(queuePath) + AUTO_CREATED_LEAF_QUEUE_TEMPLATE_PREFIX + + DOT; + conf.forEach(entry -> { + if (entry.getKey().startsWith(configPrefix)) { + String name = entry.getKey(); + int start = name.lastIndexOf(AUTO_CREATED_LEAF_QUEUE_TEMPLATE_PREFIX + + DOT); + add(new ConfItem(name.substring(start), entry.getValue())); + } + }); + } + + public void add(ConfItem confItem) { + property.add(confItem); + } + + public ArrayList getItems() { + return property; + } + + /** + * This class stores the Configuration Property. + */ + @XmlAccessorType(XmlAccessType.FIELD) + public static class ConfItem { + + private String name; + private String value; + + public ConfItem() { + // JAXB needs this + } + + public ConfItem(String name, String value){ + this.name = name; + this.value = value; + } + + public String getKey() { + return name; + } + + public String getValue() { + return value; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java index f945e0db04e0f..31aca844d7587 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java @@ -30,12 +30,13 @@ import org.apache.hadoop.yarn.api.records.NodeAttribute; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; @XmlRootElement(name = "node") @XmlAccessorType(XmlAccessType.FIELD) @@ -54,6 +55,8 @@ public class NodeInfo { protected long availMemoryMB; protected long usedVirtualCores; protected long availableVirtualCores; + private float memUtilization; + private float cpuUtilization; private int numRunningOpportContainers; private long usedMemoryOpportGB; private long usedVirtualCoresOpport; @@ -84,6 +87,23 @@ public NodeInfo(RMNode ni, ResourceScheduler sched) { report.getAvailableResource().getVirtualCores(); this.usedResource = new ResourceInfo(report.getUsedResource()); this.availableResource = new ResourceInfo(report.getAvailableResource()); + Resource totalPhysical = ni.getPhysicalResource(); + long nodeMem; + long nodeCores; + if (totalPhysical == null) { + nodeMem = + this.usedMemoryMB + this.availMemoryMB; + // If we don't know the number of physical cores, assume 1. Not + // accurate but better than nothing. + nodeCores = 1; + } else { + nodeMem = totalPhysical.getMemorySize(); + nodeCores = totalPhysical.getVirtualCores(); + } + this.memUtilization = nodeMem <= 0 ? 0 + : (float)report.getUtilization().getPhysicalMemory() * 100F / nodeMem; + this.cpuUtilization = + (float)report.getUtilization().getCPU() * 100F / nodeCores; } this.id = id.toString(); this.rack = ni.getRackName(); @@ -229,6 +249,22 @@ public ResourceUtilizationInfo getResourceUtilization() { return this.resourceUtilization; } + public float getMemUtilization() { + return memUtilization; + } + + public void setMemUtilization(float util) { + this.memUtilization = util; + } + + public float getVcoreUtilization() { + return cpuUtilization; + } + + public void setVcoreUtilization(float util) { + this.cpuUtilization = util; + } + public String getAllocationTagsSummary() { return this.allocationTags == null ? "" : this.allocationTags.toString(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/AppManagerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/AppManagerTestBase.java index 63b4e44f0e1d3..33fe02824513a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/AppManagerTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/AppManagerTestBase.java @@ -18,24 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import static java.util.stream.Collectors.toSet; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -import java.util.List; -import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.mockito.ArgumentCaptor; /** * Base class for AppManager related test. @@ -76,28 +67,6 @@ public int getNumberOfCompletedAppsInStateStore() { return this.completedAppsInStateStore; } - public List getCompletedApps() { - return completedApps; - } - - public Set getFirstNCompletedApps(int n) { - return getCompletedApps().stream().limit(n).collect(toSet()); - } - - public Set getCompletedAppsWithEvenIdsInRange(int n) { - return getCompletedApps().stream().limit(n) - .filter(app -> app.getId() % 2 == 0).collect(toSet()); - } - - public Set getRemovedAppsFromStateStore(int numRemoves) { - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(RMApp.class); - verify(stateStore, times(numRemoves)) - .removeApplication(argumentCaptor.capture()); - return argumentCaptor.getAllValues().stream().map(RMApp::getApplicationId) - .collect(toSet()); - } - public void submitApplication( ApplicationSubmissionContext submissionContext, String user) throws YarnException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java index cf141181262b2..3f2ecff06a1cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterServiceTestBase.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.Event; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockMemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockMemoryRMStateStore.java index ef9606fd27cf6..3d5f3891b07be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockMemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockMemoryRMStateStore.java @@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Test helper for MemoryRMStateStore will make sure the event. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java index 3543bc4707ec6..d433753701b25 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java @@ -18,6 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -187,6 +190,17 @@ public RegisterNodeManagerResponse registerNode( req.setNodeLabels(nodeLabels); } + NodeStatus status = Records.newRecord(NodeStatus.class); + status.setResponseId(0); + status.setNodeId(nodeId); + status.setContainersStatuses(new ArrayList<>(containerStats.values())); + NodeHealthStatus healthStatus = Records.newRecord(NodeHealthStatus.class); + healthStatus.setHealthReport(""); + healthStatus.setIsNodeHealthy(true); + healthStatus.setLastHealthReportTime(1); + status.setNodeHealthStatus(healthStatus); + req.setNodeStatus(status); + RegisterNodeManagerResponse registrationResponse = resourceTracker.registerNodeManager(req); this.currentContainerTokenMasterKey = @@ -364,6 +378,14 @@ public NodeHeartbeatResponse nodeHeartbeat(List updatedStats, return heartbeatResponse; } + public static NodeStatus createMockNodeStatus() { + NodeStatus mockNodeStatus = mock(NodeStatus.class); + NodeHealthStatus mockNodeHealthStatus = mock(NodeHealthStatus.class); + when(mockNodeStatus.getNodeHealthStatus()).thenReturn(mockNodeHealthStatus); + when(mockNodeHealthStatus.getIsNodeHealthy()).thenReturn(true); + return mockNodeStatus; + } + public long getMemory() { return capability.getMemorySize(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 600edfc7f4d22..0de6c572a2c22 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -42,8 +42,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Test helper to generate mock nodes @@ -335,6 +335,13 @@ public RMContext getRMContext() { public Resource getPhysicalResource() { return this.physicalResource; } + + @Override + public long calculateHeartBeatInterval( + long defaultInterval, long minInterval, long maxInterval, + float speedupFactor, float slowdownFactor) { + return defaultInterval; + } }; private static RMNode buildRMNode(int rack, final Resource perNode, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index b3888c3cd6679..90c554361c03b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.token.Token; @@ -54,6 +56,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; @@ -543,7 +546,9 @@ public MockNM registerNode(String nodeIdStr, Resource nodeCapability) public void sendNodeStarted(MockNM nm) throws Exception { RMNodeImpl node = (RMNodeImpl) getRMContext().getRMNodes().get( nm.getNodeId()); - node.handle(new RMNodeStartedEvent(nm.getNodeId(), null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + node.handle(new RMNodeStartedEvent(nm.getNodeId(), null, null, + mockNodeStatus)); drainEventsImplicitly(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java index 1e4b050816138..06c4527e5ba73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java @@ -98,7 +98,7 @@ public class NodeManager implements ContainerManagementProtocol { public NodeManager(String hostName, int containerManagerPort, int httpPort, String rackName, Resource capability, - ResourceManager resourceManager) + ResourceManager resourceManager, NodeStatus nodestatus) throws IOException, YarnException { this.containerManagerAddress = hostName + ":" + containerManagerPort; this.nodeHttpAddress = hostName + ":" + httpPort; @@ -113,6 +113,7 @@ public NodeManager(String hostName, int containerManagerPort, int httpPort, request.setResource(capability); request.setNodeId(this.nodeId); request.setNMVersion(YarnVersionInfo.getVersion()); + request.setNodeStatus(nodestatus); resourceTrackerService.registerNodeManager(request); this.resourceManager = resourceManager; resourceManager.getResourceScheduler().getNodeReport(this.nodeId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 3e749003c699e..e8b4105e9b562 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -19,10 +19,13 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.QueueACL; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -81,15 +84,20 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; +import org.mockito.ArgumentCaptor; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import java.io.IOException; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -100,6 +108,7 @@ import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; @@ -118,12 +127,16 @@ */ public class TestAppManager extends AppManagerTestBase{ + @Rule + public UseCapacitySchedulerRule shouldUseCs = new UseCapacitySchedulerRule(); + private static final Logger LOG = LoggerFactory.getLogger(TestAppManager.class); private static RMAppEventType appEventType = RMAppEventType.KILL; private static String USER = "user_"; private static String USER0 = USER + 0; + private ResourceScheduler scheduler; private static final String USER_ID_PREFIX = "userid="; @@ -143,52 +156,12 @@ private static List newRMApps(int n, long time, RMAppState state) { return list; } - private static List newRMAppsMixedLogAggregationStatus(int n, - long time, RMAppState state) { - List list = Lists.newArrayList(); - for (int i = 0; i < n; ++i) { - MockRMApp rmApp = new MockRMApp(i, time, state); - rmApp.setLogAggregationEnabled(true); - rmApp.setLogAggregationFinished(i % 2 == 0); - list.add(rmApp); - } - return list; - } - public RMContext mockRMContext(int n, long time) { - final ConcurrentMap map = createRMAppsMap(n, time); - return createMockRMContextInternal(map); - } - - public RMContext mockRMContextWithMixedLogAggregationStatus(int n, - long time) { - final ConcurrentMap map = - createRMAppsMapMixedLogAggStatus(n, time); - return createMockRMContextInternal(map); - } - - private ConcurrentMap createRMAppsMap(int n, - long time) { final List apps = newRMApps(n, time, RMAppState.FINISHED); final ConcurrentMap map = Maps.newConcurrentMap(); for (RMApp app : apps) { map.put(app.getApplicationId(), app); } - return map; - } - - private ConcurrentMap createRMAppsMapMixedLogAggStatus( - int n, long time) { - final List apps = - newRMAppsMixedLogAggregationStatus(n, time, RMAppState.FINISHED); - final ConcurrentMap map = Maps.newConcurrentMap(); - for (RMApp app : apps) { - map.put(app.getApplicationId(), app); - } - return map; - } - - private RMContext createMockRMContextInternal(ConcurrentMap map) { Dispatcher rmDispatcher = new AsyncDispatcher(); ContainerAllocationExpirer containerAllocationExpirer = new ContainerAllocationExpirer( rmDispatcher); @@ -240,12 +213,8 @@ public void handle(RMAppEvent event) { } } - private void addToCompletedApps(TestRMAppManager appMonitor, - RMContext rmContext) { - // ensure applications are finished in order by their IDs - List sortedApps = new ArrayList<>(rmContext.getRMApps().values()); - sortedApps.sort(Comparator.comparingInt(o -> o.getApplicationId().getId())); - for (RMApp app : sortedApps) { + protected void addToCompletedApps(TestRMAppManager appMonitor, RMContext rmContext) { + for (RMApp app : rmContext.getRMApps().values()) { if (app.getState() == RMAppState.FINISHED || app.getState() == RMAppState.KILLED || app.getState() == RMAppState.FAILED) { @@ -269,7 +238,13 @@ public void setUp() throws IOException { rmContext = mockRMContext(1, now - 10); rmContext .setRMTimelineCollectorManager(mock(RMTimelineCollectorManager.class)); - ResourceScheduler scheduler = mockResourceScheduler(); + + if (shouldUseCs.useCapacityScheduler()) { + scheduler = mockResourceScheduler(CapacityScheduler.class); + } else { + scheduler = mockResourceScheduler(); + } + ((RMContextImpl)rmContext).setScheduler(scheduler); Configuration conf = new Configuration(); @@ -654,32 +629,18 @@ public void testStateStoreAppLimitLessThanMemoryAppLimit() { addToCompletedApps(appMonitor, rmContext); Assert.assertEquals("Number of completed apps incorrect", allApps, appMonitor.getCompletedAppsListSize()); - - int numRemoveAppsFromStateStore = allApps - maxAppsInStateStore; - Set appsShouldBeRemovedFromStateStore = appMonitor - .getFirstNCompletedApps(numRemoveAppsFromStateStore); appMonitor.checkAppNumCompletedLimit(); - Set removedAppsFromStateStore = appMonitor - .getRemovedAppsFromStateStore(numRemoveAppsFromStateStore); - Assert.assertEquals("Number of apps incorrect after # completed check", maxAppsInMemory, rmContext.getRMApps().size()); Assert.assertEquals("Number of completed apps incorrect after check", maxAppsInMemory, appMonitor.getCompletedAppsListSize()); + int numRemoveAppsFromStateStore = 10 - maxAppsInStateStore; verify(rmContext.getStateStore(), times(numRemoveAppsFromStateStore)) .removeApplication(isA(RMApp.class)); Assert.assertEquals(maxAppsInStateStore, appMonitor.getNumberOfCompletedAppsInStateStore()); - - List completedApps = appMonitor.getCompletedApps(); - Assert.assertEquals(maxAppsInMemory, completedApps.size()); - Assert.assertEquals(numRemoveAppsFromStateStore, - removedAppsFromStateStore.size()); - Assert.assertEquals(numRemoveAppsFromStateStore, - Sets.intersection(appsShouldBeRemovedFromStateStore, - removedAppsFromStateStore).size()); } @Test @@ -697,12 +658,9 @@ public void testStateStoreAppLimitGreaterThanMemoryAppLimit() { addToCompletedApps(appMonitor, rmContext); Assert.assertEquals("Number of completed apps incorrect", allApps, appMonitor.getCompletedAppsListSize()); - - int numRemoveApps = allApps - maxAppsInMemory; - Set appsShouldBeRemoved = appMonitor - .getFirstNCompletedApps(numRemoveApps); appMonitor.checkAppNumCompletedLimit(); + int numRemoveApps = allApps - maxAppsInMemory; Assert.assertEquals("Number of apps incorrect after # completed check", maxAppsInMemory, rmContext.getRMApps().size()); Assert.assertEquals("Number of completed apps incorrect after check", @@ -711,56 +669,6 @@ public void testStateStoreAppLimitGreaterThanMemoryAppLimit() { isA(RMApp.class)); Assert.assertEquals(maxAppsInMemory, appMonitor.getNumberOfCompletedAppsInStateStore()); - - List completedApps = appMonitor.getCompletedApps(); - Assert.assertEquals(maxAppsInMemory, completedApps.size()); - Assert.assertEquals(numRemoveApps, appsShouldBeRemoved.size()); - assertTrue(Collections.disjoint(completedApps, appsShouldBeRemoved)); - } - - @Test - public void testStateStoreAppLimitSomeAppsHaveNotFinishedLogAggregation() { - long now = System.currentTimeMillis(); - final int allApps = 10; - RMContext rmContext = - mockRMContextWithMixedLogAggregationStatus(allApps, now - 20000); - Configuration conf = new YarnConfiguration(); - int maxAppsInMemory = 2; - conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, - maxAppsInMemory); - // greater than maxCompletedAppsInMemory, reset to - // RM_MAX_COMPLETED_APPLICATIONS. - conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS, - 1000); - TestRMAppManager appMonitor = new TestRMAppManager(rmContext, conf); - - addToCompletedApps(appMonitor, rmContext); - Assert.assertEquals("Number of completed apps incorrect", allApps, - appMonitor.getCompletedAppsListSize()); - - int numRemoveApps = allApps - maxAppsInMemory; - int effectiveNumRemoveApps = numRemoveApps / 2; - //only apps with even ID would be deleted due to log aggregation status - int expectedNumberOfAppsInMemory = maxAppsInMemory + effectiveNumRemoveApps; - - Set appsShouldBeRemoved = appMonitor - .getCompletedAppsWithEvenIdsInRange(numRemoveApps); - appMonitor.checkAppNumCompletedLimit(); - - Assert.assertEquals("Number of apps incorrect after # completed check", - expectedNumberOfAppsInMemory, rmContext.getRMApps().size()); - Assert.assertEquals("Number of completed apps incorrect after check", - expectedNumberOfAppsInMemory, appMonitor.getCompletedAppsListSize()); - verify(rmContext.getStateStore(), times(effectiveNumRemoveApps)) - .removeApplication(isA(RMApp.class)); - Assert.assertEquals(expectedNumberOfAppsInMemory, - appMonitor.getNumberOfCompletedAppsInStateStore()); - - List completedApps = appMonitor.getCompletedApps(); - - Assert.assertEquals(expectedNumberOfAppsInMemory, completedApps.size()); - Assert.assertEquals(effectiveNumRemoveApps, appsShouldBeRemoved.size()); - assertTrue(Collections.disjoint(completedApps, appsShouldBeRemoved)); } protected void setupDispatcher(RMContext rmContext, Configuration conf) { @@ -980,17 +888,20 @@ public void testRMAppSubmitWithInvalidTokens() throws Exception { @Test (timeout = 30000) public void testRMAppSubmitMaxAppAttempts() throws Exception { int[] globalMaxAppAttempts = new int[] { 10, 1 }; + int[] rmAmMaxAttempts = new int[] { 8, 1 }; int[][] individualMaxAppAttempts = new int[][]{ new int[]{ 9, 10, 11, 0 }, new int[]{ 1, 10, 0, -1 }}; int[][] expectedNums = new int[][]{ - new int[]{ 9, 10, 10, 10 }, + new int[]{ 9, 10, 10, 8 }, new int[]{ 1, 1, 1, 1 }}; for (int i = 0; i < globalMaxAppAttempts.length; ++i) { for (int j = 0; j < individualMaxAppAttempts.length; ++j) { - ResourceScheduler scheduler = mockResourceScheduler(); + scheduler = mockResourceScheduler(); Configuration conf = new Configuration(); - conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, globalMaxAppAttempts[i]); + conf.setInt(YarnConfiguration.GLOBAL_RM_AM_MAX_ATTEMPTS, + globalMaxAppAttempts[i]); + conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, rmAmMaxAttempts[i]); ApplicationMasterService masterService = new ApplicationMasterService(rmContext, scheduler); TestRMAppManager appMonitor = new TestRMAppManager(rmContext, @@ -1081,6 +992,17 @@ public void testEscapeApplicationSummary() { when(app.getSubmitTime()).thenReturn(1000L); when(app.getLaunchTime()).thenReturn(2000L); when(app.getApplicationTags()).thenReturn(Sets.newHashSet("tag2", "tag1")); + + RMAppAttempt mockRMAppAttempt = mock(RMAppAttempt.class); + Container mockContainer = mock(Container.class); + NodeId mockNodeId = mock(NodeId.class); + String host = "127.0.0.1"; + + when(mockNodeId.getHost()).thenReturn(host); + when(mockContainer.getNodeId()).thenReturn(mockNodeId); + when(mockRMAppAttempt.getMasterContainer()).thenReturn(mockContainer); + when(app.getCurrentAppAttempt()).thenReturn(mockRMAppAttempt); + Map resourceSecondsMap = new HashMap<>(); resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), 16384L); resourceSecondsMap.put(ResourceInformation.VCORES.getName(), 64L); @@ -1102,6 +1024,7 @@ public void testEscapeApplicationSummary() { assertTrue(msg.contains("Multiline" + escaped +"AppName")); assertTrue(msg.contains("Multiline" + escaped +"UserName")); assertTrue(msg.contains("Multiline" + escaped +"QueueName")); + assertTrue(msg.contains("appMasterHost=" + host)); assertTrue(msg.contains("submitTime=1000")); assertTrue(msg.contains("launchTime=2000")); assertTrue(msg.contains("memorySeconds=16384")); @@ -1155,7 +1078,12 @@ public ApplicationPlacementContext answer(InvocationOnMock invocation) } private static ResourceScheduler mockResourceScheduler() { - ResourceScheduler scheduler = mock(ResourceScheduler.class); + return mockResourceScheduler(ResourceScheduler.class); + } + + private static ResourceScheduler + mockResourceScheduler(Class schedulerClass) { + ResourceScheduler scheduler = mock(schedulerClass); when(scheduler.getMinimumResourceCapability()).thenReturn( Resources.createResource( YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB)); @@ -1393,6 +1321,51 @@ public void testGetUserNameForPlacementNoRuleDefined() Assert.assertEquals(expectedUser, userNameForPlacement); } + @Test + @UseMockCapacityScheduler + public void testCheckAccessFullPathWithCapacityScheduler() + throws YarnException { + // make sure we only combine "parent + queue" if CS is selected + testCheckAccess("root.users", "hadoop"); + } + + @Test + @UseMockCapacityScheduler + public void testCheckAccessLeafQueueOnlyWithCapacityScheduler() + throws YarnException { + // make sure we that NPE is avoided if there's no parent defined + testCheckAccess(null, "hadoop"); + } + + private void testCheckAccess(String parent, String queue) + throws YarnException { + enableApplicationTagPlacement(true, "hadoop"); + String userIdTag = USER_ID_PREFIX + "hadoop"; + setApplicationTags("tag1", userIdTag, "tag2"); + PlacementManager placementMgr = mock(PlacementManager.class); + ApplicationPlacementContext appContext; + String expectedQueue; + if (parent == null) { + appContext = new ApplicationPlacementContext(queue); + expectedQueue = queue; + } else { + appContext = new ApplicationPlacementContext(queue, parent); + expectedQueue = parent + "." + queue; + } + + when(placementMgr.placeApplication(asContext, "hadoop")) + .thenReturn(appContext); + appMonitor.getUserNameForPlacement("hadoop", asContext, placementMgr); + + ArgumentCaptor queueNameCaptor = + ArgumentCaptor.forClass(String.class); + verify(scheduler).checkAccess(any(UserGroupInformation.class), + any(QueueACL.class), queueNameCaptor.capture()); + + assertEquals("Expected access check for queue", + expectedQueue, queueNameCaptor.getValue()); + } + private void enableApplicationTagPlacement(boolean userHasAccessToQueue, String... whiteListedUsers) { Configuration conf = new Configuration(); @@ -1401,7 +1374,6 @@ private void enableApplicationTagPlacement(boolean userHasAccessToQueue, conf.setStrings(YarnConfiguration .APPLICATION_TAG_BASED_PLACEMENT_USER_WHITELIST, whiteListedUsers); ((RMContextImpl) rmContext).setYarnConfiguration(conf); - ResourceScheduler scheduler = mockResourceScheduler(); when(scheduler.checkAccess(any(UserGroupInformation.class), eq(QueueACL.SUBMIT_APPLICATIONS), any(String.class))) .thenReturn(userHasAccessToQueue); @@ -1432,4 +1404,24 @@ private void setApplicationTags(String... tags) { Collections.addAll(applicationTags, tags); asContext.setApplicationTags(applicationTags); } + + private class UseCapacitySchedulerRule extends TestWatcher { + private boolean useCapacityScheduler; + + @Override + protected void starting(Description d) { + useCapacityScheduler = + d.getAnnotation(UseMockCapacityScheduler.class) != null; + } + + public boolean useCapacityScheduler() { + return useCapacityScheduler; + } + } + + @Retention(RetentionPolicy.RUNTIME) + public @interface UseMockCapacityScheduler { + // mark test cases with this which require + // the scheduler type to be CapacityScheduler + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java index 3cf809d7f3ea9..355f28877b7d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java @@ -92,7 +92,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; @@ -453,6 +453,41 @@ public void testSetupTokensWithHTTPS() throws Exception { testSetupTokens(true, conf); } + @Test + public void testAMMasterContainerHost() throws Exception { + //Test that masterContainer and its associated host are + //set before the AM is even launched. + MockRM rm = new MockRM(); + rm.start(); + String host = "127.0.0.1"; + String port = "1234"; + MockNM nm1 = rm.registerNode(host + ":" + port, 5120); + RMApp app = MockRMAppSubmitter.submitWithMemory(2000, rm); + // kick the scheduling + nm1.nodeHeartbeat(true); + RMAppAttempt attempt = app.getCurrentAppAttempt(); + + try { + GenericTestUtils.waitFor(new Supplier() { + @Override public Boolean get() { + return attempt.getMasterContainer() != null; + } + }, 10, 200 * 100); + } catch (TimeoutException e) { + fail("timed out while waiting for AM Launch to happen."); + } + + Assert.assertEquals( + app.getCurrentAppAttempt().getMasterContainer().getNodeId().getHost(), + host); + + //send kill before launch + rm.killApp(app.getApplicationId()); + rm.waitForState(app.getApplicationId(), RMAppState.KILLED); + + rm.stop(); + } + private void testSetupTokens(boolean https, YarnConfiguration conf) throws Exception { MockRM rm = new MockRM(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterServiceFair.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterServiceFair.java index 5f6f9802e300b..6b268381fd4a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterServiceFair.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterServiceFair.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 82695ed5c4808..a4629017a53f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -187,8 +187,8 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestClientRMService { @@ -593,6 +593,51 @@ public void testForceKillNonExistingApplication() throws YarnException { } } + @Test + public void testApplicationTagsValidation() throws IOException { + YarnConfiguration conf = new YarnConfiguration(); + int maxtags = 3, appMaxTagLength = 5; + conf.setInt(YarnConfiguration.RM_APPLICATION_MAX_TAGS, maxtags); + conf.setInt(YarnConfiguration.RM_APPLICATION_MAX_TAG_LENGTH, + appMaxTagLength); + MockRM rm = new MockRM(conf); + rm.init(conf); + rm.start(); + + ClientRMService rmService = rm.getClientRMService(); + + List tags = Arrays.asList("Tag1", "Tag2", "Tag3", "Tag4"); + validateApplicationTag(rmService, tags, + "Too many applicationTags, a maximum of only " + maxtags + + " are allowed!"); + + tags = Arrays.asList("ApplicationTag1", "ApplicationTag2", + "ApplicationTag3"); + // tags are converted to lowercase in + // ApplicationSubmissionContext#setApplicationTags + validateApplicationTag(rmService, tags, + "Tag applicationtag1 is too long, maximum allowed length of a tag is " + + appMaxTagLength); + + tags = Arrays.asList("tãg1", "tag2#"); + validateApplicationTag(rmService, tags, + "A tag can only have ASCII characters! Invalid tag - tãg1"); + rm.close(); + } + + private void validateApplicationTag(ClientRMService rmService, + List tags, String errorMsg) { + SubmitApplicationRequest submitRequest = mockSubmitAppRequest( + getApplicationId(101), MockApps.newAppName(), QUEUE_1, + new HashSet(tags)); + try { + rmService.submitApplication(submitRequest); + Assert.fail(); + } catch (Exception ex) { + Assert.assertTrue(ex.getMessage().contains(errorMsg)); + } + } + @Test public void testForceKillApplication() throws Exception { YarnConfiguration conf = new YarnConfiguration(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java index 1a81b640925d9..a5762680942d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.curator.CuratorZookeeperClient; import org.apache.curator.test.InstanceSpec; import org.apache.curator.test.KillSession; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java index 5c5cb25fd6225..901dc8a143020 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; @@ -1045,7 +1045,7 @@ public ResourceScheduler getScheduler() { // Verify that the OpportunisticContainerAllocatorAMSercvice can handle // vanilla ApplicationMasterProtocol clients RPC.setProtocolEngine(conf, ApplicationMasterProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); ApplicationMasterProtocolPB ampProxy = RPC.getProxy(ApplicationMasterProtocolPB .class, 1, NetUtils.getConnectAddress(server), conf); @@ -1080,7 +1080,7 @@ public ResourceScheduler getScheduler() { // Verify that the DistrubutedSchedulingService can handle the // DistributedSchedulingAMProtocol clients as well RPC.setProtocolEngine(conf, DistributedSchedulingAMProtocolPB.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); DistributedSchedulingAMProtocolPB dsProxy = RPC.getProxy(DistributedSchedulingAMProtocolPB .class, 1, NetUtils.getConnectAddress(server), conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java index 2078d327756ff..a3bdf3945ad06 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java index e235632f75cf9..699d975eff09f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java @@ -97,9 +97,9 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java index 282ff11152265..e8a532d1baf29 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java @@ -31,7 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ipc.ClientId; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.TestRPC.TestImpl; @@ -420,7 +420,7 @@ public TestProtos.EmptyResponseProto ping( public void testRMAuditLoggerWithIP() throws Exception { Configuration conf = new Configuration(); RPC.setProtocolEngine(conf, TestRpcService.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); // Create server side implementation MyTestRPCServer serverImpl = new MyTestRPCServer(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java index b44a8aed8157e..83d81ec9ebb0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForNodeLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForNodeLabels.java index 30f52acfc8c7a..5d6638ce13515 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForNodeLabels.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForNodeLabels.java @@ -33,7 +33,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestRMHAForNodeLabels extends RMHATestBase { public static final Logger LOG = LoggerFactory diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 1f1e164cf5b04..dad27839cf6c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; @@ -44,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceOption; +import org.apache.hadoop.yarn.api.records.ResourceUtilization; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.event.InlineDispatcher; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; @@ -216,8 +220,9 @@ private RMNodeStatusEvent getMockRMNodeStatusEventWithoutRunningApps() { @Test (timeout = 5000) public void testExpiredContainer() { + NodeStatus mockNodeStatus = createMockNodeStatus(); // Start the node - node.handle(new RMNodeStartedEvent(null, null, null)); + node.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); verify(scheduler).handle(any(NodeAddedSchedulerEvent.class)); // Expire a container @@ -280,12 +285,13 @@ public void testRecommissionNode() { @Test (timeout = 5000) public void testContainerUpdate() throws InterruptedException{ + NodeStatus mockNodeStatus = createMockNodeStatus(); //Start the node - node.handle(new RMNodeStartedEvent(null, null, null)); + node.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); NodeId nodeId = BuilderUtils.newNodeId("localhost:1", 1); RMNodeImpl node2 = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, null, null); - node2.handle(new RMNodeStartedEvent(null, null, null)); + node2.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); ApplicationId app0 = BuilderUtils.newApplicationId(0, 0); ApplicationId app1 = BuilderUtils.newApplicationId(1, 1); @@ -341,8 +347,9 @@ public void testContainerUpdate() throws InterruptedException{ @Test (timeout = 5000) public void testStatusChange(){ + NodeStatus mockNodeStatus = createMockNodeStatus(); //Start the node - node.handle(new RMNodeStartedEvent(null, null, null)); + node.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); //Add info to the queue first node.setNextHeartBeat(false); @@ -607,6 +614,33 @@ public void testUnhealthyRebooting() { Assert.assertEquals(NodeState.REBOOTED, node.getState()); } + @Test + public void testAddUnhealthyNode() { + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); + + NodeHealthStatus status = NodeHealthStatus.newInstance(false, "sick", + System.currentTimeMillis()); + NodeStatus nodeStatus = NodeStatus.newInstance(node.getNodeID(), 0, + new ArrayList<>(), null, status, null, null, null); + node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + nodeStatus)); + + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy + 1, cm.getUnhealthyNMs()); + Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted, cm.getNumRebootedNMs()); + Assert.assertEquals(NodeState.UNHEALTHY, node.getState()); + } + @Test public void testNMShutdown() { RMNodeImpl node = getRunningNode(); @@ -712,7 +746,9 @@ private RMNodeImpl getRunningNode(String nmVersion, int port) { Resource capability = Resource.newInstance(4096, 4); RMNodeImpl node = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, capability, nmVersion); - node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + mockNodeStatus)); Assert.assertEquals(NodeState.RUNNING, node.getState()); return node; } @@ -763,7 +799,10 @@ private RMNodeImpl getRebootedNode() { Resource capability = Resource.newInstance(4096, 4); RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0, null, capability, null); - node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + + node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + mockNodeStatus)); Assert.assertEquals(NodeState.RUNNING, node.getState()); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.REBOOTING)); Assert.assertEquals(NodeState.REBOOTED, node.getState()); @@ -779,7 +818,9 @@ public void testAdd() { int initialUnhealthy = cm.getUnhealthyNMs(); int initialDecommissioned = cm.getNumDecommisionedNMs(); int initialRebooted = cm.getNumRebootedNMs(); - node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + mockNodeStatus)); Assert.assertEquals("Active Nodes", initialActive + 1, cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes", @@ -1024,10 +1065,13 @@ public void testResourceUpdateOnRecommissioningNode() { Resource oldCapacity = node.getTotalCapability(); assertEquals("Memory resource is not match.", oldCapacity.getMemorySize(), 4096); assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); + assertFalse("updatedCapability should be false.", + node.isUpdatedCapability()); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.RECOMMISSION)); Resource originalCapacity = node.getOriginalTotalCapability(); assertEquals("Original total capability not null after recommission", null, originalCapacity); + assertTrue("updatedCapability should be set.", node.isUpdatedCapability()); } @Test @@ -1052,6 +1096,12 @@ public void testDisappearingContainer() { node.getLaunchedContainers().contains(cid1)); Assert.assertTrue("second container not running", node.getLaunchedContainers().contains(cid2)); + assertEquals("unexpected number of running containers", + 2, node.getUpdatedExistContainers().size()); + Assert.assertTrue("first container not running", + node.getUpdatedExistContainers().containsKey(cid1)); + Assert.assertTrue("second container not running", + node.getUpdatedExistContainers().containsKey(cid2)); assertEquals("already completed containers", 0, completedContainers.size()); containerStats.remove(0); @@ -1071,12 +1121,17 @@ public void testDisappearingContainer() { 1, node.getLaunchedContainers().size()); Assert.assertTrue("second container not running", node.getLaunchedContainers().contains(cid2)); + assertEquals("unexpected number of running containers", + 1, node.getUpdatedExistContainers().size()); + Assert.assertTrue("second container not running", + node.getUpdatedExistContainers().containsKey(cid2)); } @Test public void testForHandlingDuplicatedCompltedContainers() { + NodeStatus mockNodeStatus = createMockNodeStatus(); // Start the node - node.handle(new RMNodeStartedEvent(null, null, null)); + node.handle(new RMNodeStartedEvent(null, null, null, mockNodeStatus)); // Add info to the queue first node.setNextHeartBeat(false); @@ -1123,4 +1178,112 @@ public void testFinishedContainersPulledByAMOnNewNode() { Assert.assertEquals(1, rmNode.getContainersToBeRemovedFromNM().size()); } + + private void calcIntervalTest(RMNodeImpl rmNode, ResourceUtilization nodeUtil, + long hbDefault, long hbMin, long hbMax, float speedup, float slowdown, + float cpuUtil, long expectedHb) { + nodeUtil.setCPU(cpuUtil); + rmNode.setNodeUtilization(nodeUtil); + long hbInterval = rmNode.calculateHeartBeatInterval(hbDefault, hbMin, hbMax, + speedup, slowdown); + assertEquals("heartbeat interval incorrect", expectedHb, hbInterval); + } + + @Test + public void testCalculateHeartBeatInterval() { + RMNodeImpl rmNode = getRunningNode(); + Resource nodeCapability = rmNode.getTotalCapability(); + ClusterMetrics metrics = ClusterMetrics.getMetrics(); + // Set cluster capability to 10 * nodeCapability + int vcoreUnit = nodeCapability.getVirtualCores(); + rmNode.setPhysicalResource(nodeCapability); + int clusterVcores = vcoreUnit * 10; + metrics.incrCapability( + Resource.newInstance(10 * nodeCapability.getMemorySize(), + clusterVcores)); + + long hbDefault = 2000; + long hbMin = 1500; + long hbMax = 2500; + float speedup = 1.0F; + float slowdown = 1.0F; + metrics.incrUtilizedVirtualCores(vcoreUnit * 5); // 50 % cluster util + ResourceUtilization nodeUtil = ResourceUtilization.newInstance( + 1024, vcoreUnit, 0.0F * vcoreUnit); // 0% rmNode util + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.0F, hbMin); // 0% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.10F, hbMin); // 10% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.20F, hbMin); // 20% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.30F, 1600); // 30% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.40F, 1800); // 40% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.50F, hbDefault); // 50% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.60F, 2200); // 60% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.70F, 2400); // 70% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.80F, hbMax); // 80% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.90F, hbMax); // 90% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 1.0F, hbMax); // 100% + + // Try with 50% speedup/slowdown factors + speedup = 0.5F; + slowdown = 0.5F; + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.0F, hbMin); // 0% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.10F, 1600); // 10% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.20F, 1700); // 20% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.30F, 1800); // 30% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.40F, 1900); // 40% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.50F, hbDefault); // 50% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.60F, 2100); // 60% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.70F, 2200); // 70% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.80F, 2300); // 80% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.90F, 2400); // 90% + + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 1.0F, hbMax); // 100% + + // With Physical Resource null, it should always return default + rmNode.setPhysicalResource(null); + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 0.1F, hbDefault); // 10% + calcIntervalTest(rmNode, nodeUtil, hbDefault, hbMin, hbMax, + speedup, slowdown, vcoreUnit * 1.0F, hbDefault); // 100% + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index 3b6d06831ae69..fa6cf3dbad4b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -134,9 +134,9 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.base.Supplier; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Sets; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestRMRestart extends ParameterizedSchedulerTestBase { private static final Logger LOG = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java index b9ff5882530bd..1cb5e1d0e7633 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; @@ -37,6 +38,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; @@ -88,12 +90,12 @@ public void tearDown() throws Exception { private org.apache.hadoop.yarn.server.resourcemanager.NodeManager registerNode(String hostName, int containerManagerPort, int httpPort, - String rackName, Resource capability) throws IOException, - YarnException { + String rackName, Resource capability, NodeStatus nodeStatus) + throws IOException, YarnException { org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm = new org.apache.hadoop.yarn.server.resourcemanager.NodeManager( hostName, containerManagerPort, httpPort, rackName, capability, - resourceManager); + resourceManager, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext() .getRMNodes().get(nm.getNodeId())); @@ -109,26 +111,30 @@ public void testResourceAllocation() final int memory = 4 * 1024; final int vcores = 4; - + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host1 = "host1"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm1 = registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(memory, vcores)); + Resources.createResource(memory, vcores), mockNodeStatus); // Register node2 String host2 = "host2"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm2 = registerNode(host2, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(memory/2, vcores/2)); + Resources.createResource(memory/2, vcores/2), mockNodeStatus); // nodes should be in RUNNING state RMNodeImpl node1 = (RMNodeImpl) resourceManager.getRMContext().getRMNodes().get( nm1.getNodeId()); RMNodeImpl node2 = (RMNodeImpl) resourceManager.getRMContext().getRMNodes().get( nm2.getNodeId()); - node1.handle(new RMNodeStartedEvent(nm1.getNodeId(), null, null)); - node2.handle(new RMNodeStartedEvent(nm2.getNodeId(), null, null)); + node1.handle(new RMNodeStartedEvent(nm1.getNodeId(), null, null, + mockNodeStatus)); + node2.handle(new RMNodeStartedEvent(nm2.getNodeId(), null, null, + mockNodeStatus)); // Submit an application Application application = new Application("user1", resourceManager); @@ -216,9 +222,12 @@ private void nodeUpdate( public void testNodeHealthReportIsNotNull() throws Exception{ String host1 = "host1"; final int memory = 4 * 1024; + + NodeStatus mockNodeStatus = createMockNodeStatus(); + org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm1 = - registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(memory, 1)); + registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(memory, 1), mockNodeStatus); nm1.heartbeat(); nm1.heartbeat(); Collection values = resourceManager.getRMContext().getRMNodes().values(); @@ -237,7 +246,7 @@ private void checkResourceUsage( @Test (timeout = 30000) public void testResourceManagerInitConfigValidation() throws Exception { Configuration conf = new YarnConfiguration(); - conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, -1); + conf.setInt(YarnConfiguration.GLOBAL_RM_AM_MAX_ATTEMPTS, -1); try { resourceManager = new MockRM(conf); fail("Exception is expected because the global max attempts" + @@ -247,6 +256,17 @@ public void testResourceManagerInitConfigValidation() throws Exception { if (!e.getMessage().startsWith( "Invalid global max attempts configuration")) throw e; } + Configuration yarnConf = new YarnConfiguration(); + yarnConf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, -1); + try { + resourceManager = new MockRM(yarnConf); + fail("Exception is expected because AM max attempts" + + " is negative."); + } catch (YarnRuntimeException e) { + // Exception is expected. + if (!e.getMessage().startsWith( + "Invalid rm am max attempts configuration")) throw e; + } } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java index 6690339d892b6..5e3e67e6eca06 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.NodeEventDispatcher; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.FileSystemNodeAttributeStore; + +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -2712,10 +2714,14 @@ protected Dispatcher createDispatcher() { RegisterNodeManagerRequest.class); NodeId nodeId = NodeId.newInstance("host2", 1234); Resource capability = BuilderUtils.newResource(1024, 1); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + req.setResource(capability); req.setNodeId(nodeId); req.setHttpPort(1234); req.setNMVersion(YarnVersionInfo.getVersion()); + req.setNodeStatus(mockNodeStatus); ContainerId c1 = ContainerId.newContainerId(appAttemptId, 1); ContainerId c2 = ContainerId.newContainerId(appAttemptId, 2); ContainerId c3 = ContainerId.newContainerId(appAttemptId, 3); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index af94859aa4bf0..da7f26bea3f67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java index a09aa006713e5..f8f0d34b81d82 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java @@ -56,7 +56,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.util.Records; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; @InterfaceAudience.Private public abstract class MockAsm extends MockApps { @@ -146,17 +146,6 @@ public String getOriginalTrackingUrl() { public int getMaxAppAttempts() { throw new UnsupportedOperationException("Not supported yet."); } - - @Override - public boolean isLogAggregationEnabled() { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override - public boolean isLogAggregationFinished() { - throw new UnsupportedOperationException("Not supported yet."); - } - @Override public ApplicationReport createAndGetApplicationReport( String clientUserName,boolean allowAccess) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java index a14130f9fd0af..c4291b6040bf7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java @@ -22,6 +22,7 @@ import java.util.List; import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmitter; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; @@ -91,6 +92,12 @@ private void syncNodeGracefulDecommission( rm.drainEvents(); } + private void syncNodeRecommissioning(MockNM nm) throws Exception { + rm.sendNodeEvent(nm, RMNodeEventType.RECOMMISSION); + rm.waitForState(nm.getNodeId(), NodeState.RUNNING); + rm.drainEvents(); + } + private AllocateResponse allocate(final ApplicationAttemptId attemptId, final AllocateRequest req) throws Exception { UserGroupInformation ugi = @@ -140,6 +147,53 @@ public void testAMRMDecommissioningNodes() throws Exception { NodeUpdateType.NODE_DECOMMISSIONING, nr.getNodeUpdateType()); } + @Test + public void testAMRMRecommissioningNodes() throws Exception { + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 10000); + MockNM nm2 = rm.registerNode("127.0.0.2:1234", 10000); + rm.drainEvents(); + + RMApp app1 = MockRMAppSubmitter.submitWithMemory(2000, rm); + + // Trigger the scheduling so the AM gets 'launched' on nm1 + nm1.nodeHeartbeat(true); + + RMAppAttempt attempt1 = app1.getCurrentAppAttempt(); + MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); + + // register AM returns no unusable node + am1.registerAppAttempt(); + + // DECOMMISSION nm2 + Integer decommissioningTimeout = 600; + syncNodeGracefulDecommission(nm2, decommissioningTimeout); + + AllocateRequest allocateRequest1 = + AllocateRequest.newInstance(0, 0F, null, null, null); + AllocateResponse response1 = + allocate(attempt1.getAppAttemptId(), allocateRequest1); + List updatedNodes = response1.getUpdatedNodes(); + Assert.assertEquals(1, updatedNodes.size()); + NodeReport nr = updatedNodes.iterator().next(); + Assert.assertEquals( + decommissioningTimeout, nr.getDecommissioningTimeout()); + Assert.assertEquals( + NodeUpdateType.NODE_DECOMMISSIONING, nr.getNodeUpdateType()); + + // Wait for nm2 to RECOMMISSION + syncNodeRecommissioning(nm2); + + AllocateRequest allocateRequest2 = AllocateRequest + .newInstance(response1.getResponseId(), 0F, null, null, null); + AllocateResponse response2 = + allocate(attempt1.getAppAttemptId(), allocateRequest2); + List updatedNodes2 = response2.getUpdatedNodes(); + Assert.assertEquals(1, updatedNodes2.size()); + NodeReport nr2 = updatedNodes2.iterator().next(); + Assert.assertEquals( + NodeUpdateType.NODE_USABLE, nr2.getNodeUpdateType()); + } + @Test public void testAMRMUnusableNodes() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java index c61b8dd9b16c7..46311e2ab9d75 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java @@ -25,8 +25,8 @@ import java.util.List; import java.util.Map; -import com.google.common.base.Throwables; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Throwables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java index 8d31fe1a8ba83..6836288ed1cd1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.logaggregationstatus; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -139,13 +140,15 @@ public void testLogAggregationStatus() throws Exception { Resource capability = Resource.newInstance(4096, 4); RMNodeImpl node1 = new RMNodeImpl(nodeId1, rmContext, null, 0, 0, null, capability, null); - node1.handle(new RMNodeStartedEvent(nodeId1, null, null)); + NodeStatus mockNodeStatus = createMockNodeStatus(); + node1.handle(new RMNodeStartedEvent(nodeId1, null, null, mockNodeStatus)); rmApp.handle(new RMAppRunningOnNodeEvent(this.appId, nodeId1)); NodeId nodeId2 = NodeId.newInstance("localhost", 2345); RMNodeImpl node2 = new RMNodeImpl(nodeId2, rmContext, null, 0, 0, null, capability, null); - node2.handle(new RMNodeStartedEvent(node2.getNodeID(), null, null)); + node2.handle(new RMNodeStartedEvent(node2.getNodeID(), null, null, + mockNodeStatus)); rmApp.handle(new RMAppRunningOnNodeEvent(this.appId, nodeId2)); // The initial log aggregation status for these two nodes diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java deleted file mode 100644 index 87f2f1f3c9690..0000000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java +++ /dev/null @@ -1,985 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.Container; -import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.api.records.Priority; -import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ResourceInformation; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.event.Dispatcher; -import org.apache.hadoop.yarn.event.EventHandler; -import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; -import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; -import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueResourceQuotas; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.QueueOrderingPolicy; -import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueCapacities; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UsersManager.User; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy; -import org.apache.hadoop.yarn.util.Clock; -import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; -import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; -import org.apache.hadoop.yarn.util.resource.ResourceCalculator; -import org.apache.hadoop.yarn.util.resource.ResourceUtils; -import org.apache.hadoop.yarn.util.resource.Resources; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.mockito.ArgumentMatcher; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.TreeSet; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.hadoop.yarn.event.Event; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isA; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class ProportionalCapacityPreemptionPolicyMockFramework { - static final Logger LOG = LoggerFactory.getLogger( - TestProportionalCapacityPreemptionPolicyForNodePartitions.class); - final String ROOT = CapacitySchedulerConfiguration.ROOT; - - Map nameToCSQueues = null; - Map partitionToResource = null; - Map nodeIdToSchedulerNodes = null; - RMNodeLabelsManager nlm = null; - RMContext rmContext = null; - - ResourceCalculator rc = new DefaultResourceCalculator(); - Clock mClock = null; - CapacitySchedulerConfiguration conf = null; - CapacityScheduler cs = null; - @SuppressWarnings("rawtypes") - EventHandler mDisp = null; - ProportionalCapacityPreemptionPolicy policy = null; - Resource clusterResource = null; - // Initialize resource map - Map riMap = new HashMap<>(); - - private void resetResourceInformationMap() { - // Initialize mandatory resources - ResourceInformation memory = ResourceInformation.newInstance( - ResourceInformation.MEMORY_MB.getName(), - ResourceInformation.MEMORY_MB.getUnits(), - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB); - ResourceInformation vcores = ResourceInformation.newInstance( - ResourceInformation.VCORES.getName(), - ResourceInformation.VCORES.getUnits(), - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); - riMap.put(ResourceInformation.MEMORY_URI, memory); - riMap.put(ResourceInformation.VCORES_URI, vcores); - - ResourceUtils.initializeResourcesFromResourceInformationMap(riMap); - } - - @SuppressWarnings("unchecked") - @Before - public void setup() { - resetResourceInformationMap(); - - org.apache.log4j.Logger.getRootLogger().setLevel( - org.apache.log4j.Level.DEBUG); - - conf = new CapacitySchedulerConfiguration(new Configuration(false)); - conf.setLong( - CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, 10000); - conf.setLong(CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, - 3000); - // report "ideal" preempt - conf.setFloat(CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND, - (float) 1.0); - conf.setFloat( - CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR, - (float) 1.0); - - mClock = mock(Clock.class); - cs = mock(CapacityScheduler.class); - when(cs.getResourceCalculator()).thenReturn(rc); - when(cs.getPreemptionManager()).thenReturn(new PreemptionManager()); - when(cs.getConfiguration()).thenReturn(conf); - - nlm = mock(RMNodeLabelsManager.class); - mDisp = mock(EventHandler.class); - - rmContext = mock(RMContext.class); - when(rmContext.getNodeLabelManager()).thenReturn(nlm); - Dispatcher disp = mock(Dispatcher.class); - when(rmContext.getDispatcher()).thenReturn(disp); - when(disp.getEventHandler()).thenReturn(mDisp); - when(cs.getRMContext()).thenReturn(rmContext); - - partitionToResource = new HashMap<>(); - nodeIdToSchedulerNodes = new HashMap<>(); - nameToCSQueues = new HashMap<>(); - clusterResource = Resource.newInstance(0, 0); - } - - @After - public void cleanup() { - resetResourceInformationMap(); - } - - public void buildEnv(String labelsConfig, String nodesConfig, - String queuesConfig, String appsConfig) throws IOException { - buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig, false); - } - - public void buildEnv(String labelsConfig, String nodesConfig, - String queuesConfig, String appsConfig, - boolean useDominantResourceCalculator) throws IOException { - if (useDominantResourceCalculator) { - when(cs.getResourceCalculator()).thenReturn( - new DominantResourceCalculator()); - } - mockNodeLabelsManager(labelsConfig); - mockSchedulerNodes(nodesConfig); - for (NodeId nodeId : nodeIdToSchedulerNodes.keySet()) { - when(cs.getSchedulerNode(nodeId)).thenReturn( - nodeIdToSchedulerNodes.get(nodeId)); - } - List allNodes = new ArrayList<>( - nodeIdToSchedulerNodes.values()); - when(cs.getAllNodes()).thenReturn(allNodes); - ParentQueue root = mockQueueHierarchy(queuesConfig); - when(cs.getRootQueue()).thenReturn(root); - when(cs.getClusterResource()).thenReturn(clusterResource); - mockApplications(appsConfig); - - policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, - mClock); - } - - public void updateQueueConfig(String queuesConfig) { - ParentQueue root = mockQueueHierarchy(queuesConfig); - when(cs.getRootQueue()).thenReturn(root); - } - - private void mockContainers(String containersConfig, FiCaSchedulerApp app, - ApplicationAttemptId attemptId, String queueName, - List reservedContainers, List liveContainers) { - int containerId = 1; - int start = containersConfig.indexOf("=") + 1; - int end = -1; - - Resource used = Resource.newInstance(0, 0); - Resource pending = Resource.newInstance(0, 0); - Priority pri = Priority.newInstance(0); - - while (start < containersConfig.length()) { - while (start < containersConfig.length() - && containersConfig.charAt(start) != '(') { - start++; - } - if (start >= containersConfig.length()) { - throw new IllegalArgumentException( - "Error containers specification, line=" + containersConfig); - } - end = start + 1; - while (end < containersConfig.length() - && containersConfig.charAt(end) != ')') { - end++; - } - if (end >= containersConfig.length()) { - throw new IllegalArgumentException( - "Error containers specification, line=" + containersConfig); - } - - // now we found start/end, get container values - String[] values = containersConfig.substring(start + 1, end).split(","); - if (values.length < 6 || values.length > 8) { - throw new IllegalArgumentException("Format to define container is:" - + "(priority,resource,host,expression,repeat,reserved, pending)"); - } - pri.setPriority(Integer.valueOf(values[0])); - Resource res = parseResourceFromString(values[1]); - NodeId host = NodeId.newInstance(values[2], 1); - String label = values[3]; - String userName = "user"; - int repeat = Integer.valueOf(values[4]); - boolean reserved = Boolean.valueOf(values[5]); - if (values.length >= 7) { - Resources.addTo(pending, parseResourceFromString(values[6])); - } - if (values.length == 8) { - userName = values[7]; - } - - for (int i = 0; i < repeat; i++) { - Container c = mock(Container.class); - Resources.addTo(used, res); - when(c.getResource()).thenReturn(res); - when(c.getPriority()).thenReturn(pri); - SchedulerRequestKey sk = SchedulerRequestKey.extractFrom(c); - RMContainerImpl rmc = mock(RMContainerImpl.class); - when(rmc.getAllocatedSchedulerKey()).thenReturn(sk); - when(rmc.getAllocatedNode()).thenReturn(host); - when(rmc.getNodeLabelExpression()).thenReturn(label); - when(rmc.getAllocatedResource()).thenReturn(res); - when(rmc.getContainer()).thenReturn(c); - when(rmc.getApplicationAttemptId()).thenReturn(attemptId); - when(rmc.getQueueName()).thenReturn(queueName); - final ContainerId cId = ContainerId.newContainerId(attemptId, - containerId); - when(rmc.getContainerId()).thenReturn(cId); - doAnswer(new Answer() { - @Override - public Integer answer(InvocationOnMock invocation) throws Throwable { - return cId.compareTo( - ((RMContainer) invocation.getArguments()[0]).getContainerId()); - } - }).when(rmc).compareTo(any(RMContainer.class)); - - if (containerId == 1) { - when(rmc.isAMContainer()).thenReturn(true); - when(app.getAMResource(label)).thenReturn(res); - when(app.getAppAMNodePartitionName()).thenReturn(label); - } - - if (reserved) { - reservedContainers.add(rmc); - when(rmc.getReservedResource()).thenReturn(res); - } else { - liveContainers.add(rmc); - } - - // Add container to scheduler-node - addContainerToSchedulerNode(host, rmc, reserved); - - // If this is a non-exclusive allocation - String partition = null; - if (label.isEmpty() - && !(partition = nodeIdToSchedulerNodes.get(host).getPartition()) - .isEmpty()) { - LeafQueue queue = (LeafQueue) nameToCSQueues.get(queueName); - Map> ignoreExclusivityContainers = queue - .getIgnoreExclusivityRMContainers(); - if (!ignoreExclusivityContainers.containsKey(partition)) { - ignoreExclusivityContainers.put(partition, - new TreeSet()); - } - ignoreExclusivityContainers.get(partition).add(rmc); - } - LOG.debug("add container to app=" + attemptId + " res=" + res + " node=" - + host + " nodeLabelExpression=" + label + " partition=" - + partition); - - containerId++; - } - - // If app has 0 container, and it has only pending, still make sure to - // update label. - if (repeat == 0) { - when(app.getAppAMNodePartitionName()).thenReturn(label); - } - - // Some more app specific aggregated data can be better filled here. - when(app.getPriority()).thenReturn(pri); - when(app.getUser()).thenReturn(userName); - when(app.getCurrentConsumption()).thenReturn(used); - when(app.getCurrentReservation()) - .thenReturn(Resources.createResource(0, 0)); - - Map pendingForDefaultPartition = - new HashMap(); - // Add for default partition for now. - pendingForDefaultPartition.put(label, pending); - when(app.getTotalPendingRequestsPerPartition()) - .thenReturn(pendingForDefaultPartition); - - // need to set pending resource in resource usage as well - ResourceUsage ru = Mockito.spy(new ResourceUsage()); - ru.setUsed(label, used); - when(ru.getCachedUsed(anyString())).thenReturn(used); - when(app.getAppAttemptResourceUsage()).thenReturn(ru); - when(app.getSchedulingResourceUsage()).thenReturn(ru); - - start = end + 1; - } - } - - /** - * Format is: - *

    -   * queueName\t  // app1
    -   * (priority,resource,host,expression,#repeat,reserved)
    -   * (priority,resource,host,expression,#repeat,reserved);
    -   * queueName\t  // app2
    -   * 
    - */ - private void mockApplications(String appsConfig) { - int id = 1; - HashMap> userMap = new HashMap>(); - HashMap>> userResourceUsagePerLabel = new HashMap<>(); - LeafQueue queue = null; - int mulp = -1; - for (String a : appsConfig.split(";")) { - String[] strs = a.split("\t"); - String queueName = strs[0]; - if (mulp <= 0 && strs.length > 2 && strs[2] != null) { - mulp = 100 / (new Integer(strs[2]).intValue()); - } - - // get containers - List liveContainers = new ArrayList(); - List reservedContainers = new ArrayList(); - ApplicationId appId = ApplicationId.newInstance(0L, id); - ApplicationAttemptId appAttemptId = ApplicationAttemptId - .newInstance(appId, 1); - - FiCaSchedulerApp app = mock(FiCaSchedulerApp.class); - when(app.getAMResource(anyString())) - .thenReturn(Resources.createResource(0, 0)); - mockContainers(strs[1], app, appAttemptId, queueName, reservedContainers, - liveContainers); - LOG.debug("Application mock: queue: " + queueName + ", appId:" + appId); - - when(app.getLiveContainers()).thenReturn(liveContainers); - when(app.getReservedContainers()).thenReturn(reservedContainers); - when(app.getApplicationAttemptId()).thenReturn(appAttemptId); - when(app.getApplicationId()).thenReturn(appId); - when(app.getQueueName()).thenReturn(queueName); - - // add to LeafQueue - queue = (LeafQueue) nameToCSQueues.get(queueName); - queue.getApplications().add(app); - queue.getAllApplications().add(app); - when(queue.getMinimumAllocation()) - .thenReturn(Resource.newInstance(1,1)); - when(app.getCSLeafQueue()).thenReturn(queue); - - HashSet users = userMap.get(queueName); - if (null == users) { - users = new HashSet(); - userMap.put(queueName, users); - } - users.add(app.getUser()); - - String label = app.getAppAMNodePartitionName(); - - // Get label to queue - HashMap> userResourceUsagePerQueue = userResourceUsagePerLabel - .get(label); - if (null == userResourceUsagePerQueue) { - userResourceUsagePerQueue = new HashMap<>(); - userResourceUsagePerLabel.put(label, userResourceUsagePerQueue); - } - - // Get queue to user based resource map - HashMap userResourceUsage = userResourceUsagePerQueue - .get(queueName); - if (null == userResourceUsage) { - userResourceUsage = new HashMap<>(); - userResourceUsagePerQueue.put(queueName, userResourceUsage); - } - - // Get user to its resource usage. - ResourceUsage usage = userResourceUsage.get(app.getUser()); - if (null == usage) { - usage = new ResourceUsage(); - userResourceUsage.put(app.getUser(), usage); - } - - usage.incAMUsed(app.getAMResource(label)); - usage.incUsed(app.getAppAttemptResourceUsage().getUsed(label)); - id++; - } - - for (String label : userResourceUsagePerLabel.keySet()) { - for (String queueName : userMap.keySet()) { - queue = (LeafQueue) nameToCSQueues.get(queueName); - // Currently we have user-limit test support only for default label. - Resource totResoucePerPartition = partitionToResource.get(""); - Resource capacity = Resources.multiply(totResoucePerPartition, - queue.getQueueCapacities().getAbsoluteCapacity()); - HashSet users = userMap.get(queue.getQueueName()); - //TODO: Refactor this test class to use queue path internally like - // CS does from now on - if (users == null) { - users = userMap.get(queue.getQueuePath()); - } - when(queue.getAllUsers()).thenReturn(users); - Resource userLimit; - if (mulp > 0) { - userLimit = Resources.divideAndCeil(rc, capacity, mulp); - } else { - userLimit = Resources.divideAndCeil(rc, capacity, - users.size()); - } - LOG.debug("Updating user-limit from mock: totResoucePerPartition=" - + totResoucePerPartition + ", capacity=" + capacity - + ", users.size()=" + users.size() + ", userlimit= " + userLimit - + ",label= " + label + ",queueName= " + queueName); - - HashMap userResourceUsage = userResourceUsagePerLabel - .get(label).get(queueName); - for (String userName : users) { - User user = new User(userName); - if (userResourceUsage != null) { - user.setResourceUsage(userResourceUsage.get(userName)); - } - when(queue.getUser(eq(userName))).thenReturn(user); - when(queue.getResourceLimitForAllUsers(eq(userName), - any(Resource.class), anyString(), any(SchedulingMode.class))) - .thenReturn(userLimit); - } - } - } - } - - private void addContainerToSchedulerNode(NodeId nodeId, RMContainer container, - boolean isReserved) { - SchedulerNode node = nodeIdToSchedulerNodes.get(nodeId); - assert node != null; - - if (isReserved) { - when(node.getReservedContainer()).thenReturn(container); - } else { - node.getCopiedListOfRunningContainers().add(container); - Resources.subtractFrom(node.getUnallocatedResource(), - container.getAllocatedResource()); - } - } - - /** - * Format is: - * host1=partition[ res=resource]; - * host2=partition[ res=resource]; - */ - private void mockSchedulerNodes(String schedulerNodesConfigStr) - throws IOException { - String[] nodesConfigStrArray = schedulerNodesConfigStr.split(";"); - for (String p : nodesConfigStrArray) { - String[] arr = p.split(" "); - - NodeId nodeId = NodeId.newInstance(arr[0].substring(0, arr[0].indexOf("=")), 1); - String partition = arr[0].substring(arr[0].indexOf("=") + 1, arr[0].length()); - - FiCaSchedulerNode sn = mock(FiCaSchedulerNode.class); - when(sn.getNodeID()).thenReturn(nodeId); - when(sn.getPartition()).thenReturn(partition); - - Resource totalRes = Resources.createResource(0); - if (arr.length > 1) { - String res = arr[1]; - if (res.contains("res=")) { - String resSring = res.substring( - res.indexOf("res=") + "res=".length()); - totalRes = parseResourceFromString(resSring); - } - } - when(sn.getTotalResource()).thenReturn(totalRes); - when(sn.getUnallocatedResource()).thenReturn(Resources.clone(totalRes)); - - // TODO, add settings of killable resources when necessary - when(sn.getTotalKillableResources()).thenReturn(Resources.none()); - - List liveContainers = new ArrayList<>(); - when(sn.getCopiedListOfRunningContainers()).thenReturn(liveContainers); - - nodeIdToSchedulerNodes.put(nodeId, sn); - - LOG.debug("add scheduler node, id=" + nodeId + ", partition=" + partition); - } - } - - /** - * Format is: - *
    -   * partition0=total_resource,exclusivity;
    -   * partition1=total_resource,exclusivity;
    -   * ...
    -   * 
    - */ - private void mockNodeLabelsManager(String nodeLabelsConfigStr) throws IOException { - String[] partitionConfigArr = nodeLabelsConfigStr.split(";"); - clusterResource = Resources.createResource(0); - for (String p : partitionConfigArr) { - String partitionName = p.substring(0, p.indexOf("=")); - Resource res = parseResourceFromString(p.substring(p.indexOf("=") + 1, - p.indexOf(","))); - boolean exclusivity = - Boolean.valueOf(p.substring(p.indexOf(",") + 1, p.length())); - when(nlm.getResourceByLabel(eq(partitionName), any(Resource.class))) - .thenReturn(res); - when(nlm.isExclusiveNodeLabel(eq(partitionName))).thenReturn(exclusivity); - - // add to partition to resource - partitionToResource.put(partitionName, res); - LOG.debug("add partition=" + partitionName + " totalRes=" + res - + " exclusivity=" + exclusivity); - Resources.addTo(clusterResource, res); - } - - when(nlm.getClusterNodeLabelNames()).thenReturn( - partitionToResource.keySet()); - } - - private Resource parseResourceFromString(String p) { - String[] resource = p.split(":"); - Resource res; - if (resource.length == 1) { - res = Resources.createResource(Integer.valueOf(resource[0])); - } else { - res = Resources.createResource(Integer.valueOf(resource[0]), - Integer.valueOf(resource[1])); - if (resource.length > 2) { - // Using the same order of resources from ResourceUtils, set resource - // informations. - ResourceInformation[] storedResourceInfo = ResourceUtils - .getResourceTypesArray(); - for (int i = 2; i < resource.length; i++) { - res.setResourceInformation(storedResourceInfo[i].getName(), - ResourceInformation.newInstance(storedResourceInfo[i].getName(), - storedResourceInfo[i].getUnits(), - Integer.valueOf(resource[i]))); - } - } - } - return res; - } - - /** - * Format is: - *
    -   * root (=[guaranteed max used pending (reserved)],=..);
    -   * -A(...);
    -   * --A1(...);
    -   * --A2(...);
    -   * -B...
    -   * 
    - * ";" splits queues, and there should no empty lines, no extra spaces - * - * For each queue, it has configurations to specify capacities (to each - * partition), format is: - *
    -   * - (=[guaranteed max used pending], \
    -   *               =[guaranteed max used pending])
    -   *              {key1=value1,key2=value2};  // Additional configs
    -   * 
    - */ - @SuppressWarnings({ "unchecked", "rawtypes" }) - private ParentQueue mockQueueHierarchy(String queueExprs) { - String[] queueExprArray = queueExprs.split(";"); - ParentQueue rootQueue = null; - for (int idx = 0; idx < queueExprArray.length; idx++) { - String q = queueExprArray[idx]; - CSQueue queue; - - // Initialize queue - if (isParent(queueExprArray, idx)) { - ParentQueue parentQueue = mock(ParentQueue.class); - queue = parentQueue; - List children = new ArrayList(); - when(parentQueue.getChildQueues()).thenReturn(children); - QueueOrderingPolicy policy = mock(QueueOrderingPolicy.class); - when(policy.getConfigName()).thenReturn( - CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY); - when(parentQueue.getQueueOrderingPolicy()).thenReturn(policy); - } else { - LeafQueue leafQueue = mock(LeafQueue.class); - final TreeSet apps = new TreeSet<>( - new Comparator() { - @Override - public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) { - if (a1.getPriority() != null - && !a1.getPriority().equals(a2.getPriority())) { - return a1.getPriority().compareTo(a2.getPriority()); - } - - int res = a1.getApplicationId() - .compareTo(a2.getApplicationId()); - return res; - } - }); - when(leafQueue.getApplications()).thenReturn(apps); - when(leafQueue.getAllApplications()).thenReturn(apps); - OrderingPolicy so = mock(OrderingPolicy.class); - String opName = conf.get(CapacitySchedulerConfiguration.PREFIX - + CapacitySchedulerConfiguration.ROOT + "." + getQueueName(q) - + ".ordering-policy", "fifo"); - if (opName.equals("fair")) { - so = Mockito.spy(new FairOrderingPolicy()); - } - when(so.getPreemptionIterator()).thenAnswer(new Answer() { - public Object answer(InvocationOnMock invocation) { - return apps.descendingIterator(); - } - }); - when(leafQueue.getOrderingPolicy()).thenReturn(so); - - Map> ignorePartitionContainers = - new HashMap<>(); - when(leafQueue.getIgnoreExclusivityRMContainers()).thenReturn( - ignorePartitionContainers); - queue = leafQueue; - } - - ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - when(queue.getReadLock()).thenReturn(lock.readLock()); - setupQueue(queue, q, queueExprArray, idx); - if (queue.getQueuePath().equals(ROOT)) { - rootQueue = (ParentQueue) queue; - } - } - return rootQueue; - } - - private void setupQueue(CSQueue queue, String q, String[] queueExprArray, - int idx) { - LOG.debug("*** Setup queue, source=" + q); - String queuePath = null; - - int myLevel = getLevel(q); - if (0 == myLevel) { - // It's root - when(queue.getQueuePath()).thenReturn(ROOT); - queuePath = ROOT; - } - - String queueName = getQueueName(q); - when(queue.getQueueName()).thenReturn(queueName); - - // Setup parent queue, and add myself to parentQueue.children-list - ParentQueue parentQueue = getParentQueue(queueExprArray, idx, myLevel); - if (null != parentQueue) { - when(queue.getParent()).thenReturn(parentQueue); - parentQueue.getChildQueues().add(queue); - - // Setup my path - queuePath = parentQueue.getQueuePath() + "." + queueName; - } - when(queue.getQueuePath()).thenReturn(queuePath); - - QueueCapacities qc = new QueueCapacities(0 == myLevel); - ResourceUsage ru = new ResourceUsage(); - QueueResourceQuotas qr = new QueueResourceQuotas(); - - when(queue.getQueueCapacities()).thenReturn(qc); - when(queue.getQueueResourceUsage()).thenReturn(ru); - when(queue.getQueueResourceQuotas()).thenReturn(qr); - - LOG.debug("Setup queue, short name=" + queue.getQueueName() + " path=" - + queue.getQueuePath()); - LOG.debug("Parent=" + (parentQueue == null ? "null" : parentQueue - .getQueuePath())); - - // Setup other fields like used resource, guaranteed resource, etc. - String capacitySettingStr = q.substring(q.indexOf("(") + 1, q.indexOf(")")); - for (String s : capacitySettingStr.split(",")) { - String partitionName = s.substring(0, s.indexOf("=")); - String[] values = s.substring(s.indexOf("[") + 1, s.indexOf("]")).split(" "); - // Add a small epsilon to capacities to avoid truncate when doing - // Resources.multiply - float epsilon = 1e-6f; - Resource totResoucePerPartition = partitionToResource.get(partitionName); - float absGuaranteed = Resources.divide(rc, totResoucePerPartition, - parseResourceFromString(values[0].trim()), totResoucePerPartition) - + epsilon; - float absMax = Resources.divide(rc, totResoucePerPartition, - parseResourceFromString(values[1].trim()), totResoucePerPartition) - + epsilon; - float absUsed = Resources.divide(rc, totResoucePerPartition, - parseResourceFromString(values[2].trim()), totResoucePerPartition) - + epsilon; - float used = Resources.divide(rc, totResoucePerPartition, - parseResourceFromString(values[2].trim()), - parseResourceFromString(values[0].trim())) + epsilon; - Resource pending = parseResourceFromString(values[3].trim()); - qc.setAbsoluteCapacity(partitionName, absGuaranteed); - qc.setAbsoluteMaximumCapacity(partitionName, absMax); - qc.setAbsoluteUsedCapacity(partitionName, absUsed); - qc.setUsedCapacity(partitionName, used); - qr.setEffectiveMaxResource(parseResourceFromString(values[1].trim())); - qr.setEffectiveMinResource(parseResourceFromString(values[0].trim())); - qr.setEffectiveMaxResource(partitionName, - parseResourceFromString(values[1].trim())); - qr.setEffectiveMinResource(partitionName, - parseResourceFromString(values[0].trim())); - when(queue.getUsedCapacity()).thenReturn(used); - when(queue.getEffectiveCapacity(partitionName)) - .thenReturn(parseResourceFromString(values[0].trim())); - when(queue.getEffectiveMaxCapacity(partitionName)) - .thenReturn(parseResourceFromString(values[1].trim())); - ru.setPending(partitionName, pending); - // Setup reserved resource if it contained by input config - Resource reserved = Resources.none(); - if(values.length == 5) { - reserved = parseResourceFromString(values[4].trim()); - ru.setReserved(partitionName, reserved); - } - if (!isParent(queueExprArray, idx)) { - LeafQueue lq = (LeafQueue) queue; - when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), - isA(String.class), eq(false))).thenReturn(pending); - when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), - isA(String.class), eq(true))).thenReturn( - Resources.subtract(pending, reserved)); - } - ru.setUsed(partitionName, parseResourceFromString(values[2].trim())); - - LOG.debug("Setup queue=" + queueName + " partition=" + partitionName - + " [abs_guaranteed=" + absGuaranteed + ",abs_max=" + absMax - + ",abs_used" + absUsed + ",pending_resource=" + pending - + ", reserved_resource=" + reserved + "]"); - } - - // Setup preemption disabled - when(queue.getPreemptionDisabled()).thenReturn( - conf.getPreemptionDisabled(queuePath, false)); - - // Setup other queue configurations - Map otherConfigs = getOtherConfigurations( - queueExprArray[idx]); - if (otherConfigs.containsKey("priority")) { - when(queue.getPriority()).thenReturn( - Priority.newInstance(Integer.valueOf(otherConfigs.get("priority")))); - } else { - // set queue's priority to 0 by default - when(queue.getPriority()).thenReturn(Priority.newInstance(0)); - } - - // Setup disable preemption of queues - if (otherConfigs.containsKey("disable_preemption")) { - when(queue.getPreemptionDisabled()).thenReturn( - Boolean.valueOf(otherConfigs.get("disable_preemption"))); - } - - //TODO: Refactor this test class to use queue path internally like CS - // does from now on - nameToCSQueues.put(queuePath, queue); - nameToCSQueues.put(queueName, queue); - when(cs.getQueue(eq(queuePath))).thenReturn(queue); - when(cs.getQueue(eq(queueName))).thenReturn(queue); - when(cs.normalizeQueueName(eq(queuePath))).thenReturn(queuePath); - when(cs.normalizeQueueName(eq(queueName))).thenReturn(queuePath); - } - - /** - * Get additional queue's configurations - * @param queueExpr queue expr - * @return maps of configs - */ - private Map getOtherConfigurations(String queueExpr) { - if (queueExpr.contains("{")) { - int left = queueExpr.indexOf('{'); - int right = queueExpr.indexOf('}'); - - if (right > left) { - Map configs = new HashMap<>(); - - String subStr = queueExpr.substring(left + 1, right); - for (String kv : subStr.split(",")) { - if (kv.contains("=")) { - String key = kv.substring(0, kv.indexOf("=")); - String value = kv.substring(kv.indexOf("=") + 1); - configs.put(key, value); - } - } - - return configs; - } - } - - return Collections.emptyMap(); - } - - /** - * Level of a queue is how many "-" at beginning, root's level is 0 - */ - private int getLevel(String q) { - int level = 0; // level = how many "-" at beginning - while (level < q.length() && q.charAt(level) == '-') { - level++; - } - return level; - } - - private String getQueueName(String q) { - int idx = 0; - // find first != '-' char - while (idx < q.length() && q.charAt(idx) == '-') { - idx++; - } - if (idx == q.length()) { - throw new IllegalArgumentException("illegal input:" + q); - } - // name = after '-' and before '(' - String name = q.substring(idx, q.indexOf('(')); - if (name.isEmpty()) { - throw new IllegalArgumentException("queue name shouldn't be empty:" + q); - } - if (name.contains(".")) { - throw new IllegalArgumentException("queue name shouldn't contain '.':" - + name); - } - return name; - } - - private ParentQueue getParentQueue(String[] queueExprArray, int idx, int myLevel) { - idx--; - while (idx >= 0) { - int level = getLevel(queueExprArray[idx]); - if (level < myLevel) { - String parentQueuName = getQueueName(queueExprArray[idx]); - return (ParentQueue) nameToCSQueues.get(parentQueuName); - } - idx--; - } - - return null; - } - - /** - * Get if a queue is ParentQueue - */ - private boolean isParent(String[] queues, int idx) { - int myLevel = getLevel(queues[idx]); - idx++; - while (idx < queues.length && getLevel(queues[idx]) == myLevel) { - idx++; - } - if (idx >= queues.length || getLevel(queues[idx]) < myLevel) { - // It's a LeafQueue - return false; - } else { - return true; - } - } - - public ApplicationAttemptId getAppAttemptId(int id) { - ApplicationId appId = ApplicationId.newInstance(0L, id); - ApplicationAttemptId appAttemptId = - ApplicationAttemptId.newInstance(appId, 1); - return appAttemptId; - } - - public void checkContainerNodesInApp(FiCaSchedulerApp app, - int expectedContainersNumber, String host) { - NodeId nodeId = NodeId.newInstance(host, 1); - int num = 0; - for (RMContainer c : app.getLiveContainers()) { - if (c.getAllocatedNode().equals(nodeId)) { - num++; - } - } - for (RMContainer c : app.getReservedContainers()) { - if (c.getAllocatedNode().equals(nodeId)) { - num++; - } - } - Assert.assertEquals(expectedContainersNumber, num); - } - - public FiCaSchedulerApp getApp(String queueName, int appId) { - for (FiCaSchedulerApp app : ((LeafQueue) cs.getQueue(queueName)) - .getApplications()) { - if (app.getApplicationId().getId() == appId) { - return app; - } - } - return null; - } - - public void checkAbsCapacities(CSQueue queue, String partition, - float guaranteed, float max, float used) { - QueueCapacities qc = queue.getQueueCapacities(); - Assert.assertEquals(guaranteed, qc.getAbsoluteCapacity(partition), 1e-3); - Assert.assertEquals(max, qc.getAbsoluteMaximumCapacity(partition), 1e-3); - Assert.assertEquals(used, qc.getAbsoluteUsedCapacity(partition), 1e-3); - } - - public void checkPendingResource(CSQueue queue, String partition, int pending) { - ResourceUsage ru = queue.getQueueResourceUsage(); - Assert.assertEquals(pending, ru.getPending(partition).getMemorySize()); - } - - public void checkPriority(CSQueue queue, int expectedPriority) { - Assert.assertEquals(expectedPriority, queue.getPriority().getPriority()); - } - - public void checkReservedResource(CSQueue queue, String partition, int reserved) { - ResourceUsage ru = queue.getQueueResourceUsage(); - Assert.assertEquals(reserved, ru.getReserved(partition).getMemorySize()); - } - - static class IsPreemptionRequestForQueueAndNode - implements ArgumentMatcher { - private final ApplicationAttemptId appAttId; - private final String queueName; - private final NodeId nodeId; - - IsPreemptionRequestForQueueAndNode(ApplicationAttemptId appAttId, - String queueName, NodeId nodeId) { - this.appAttId = appAttId; - this.queueName = queueName; - this.nodeId = nodeId; - } - @Override - public boolean matches(ContainerPreemptEvent cpe) { - return appAttId.equals(cpe.getAppId()) - && queueName.equals(cpe.getContainer().getQueueName()) - && nodeId.equals(cpe.getContainer().getAllocatedNode()); - } - @Override - public String toString() { - return appAttId.toString(); - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java index f9fda99d80944..1f82617543fd0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java @@ -20,6 +20,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; @@ -39,7 +40,7 @@ public class TestPreemptionForQueueWithPriorities extends ProportionalCapacityPreemptionPolicyMockFramework { @Before public void setup() { - rc = new DefaultResourceCalculator(); + resourceCalculator = new DefaultResourceCalculator(); super.setup(); policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock); } @@ -82,13 +83,13 @@ public void testPreemptionForHighestPriorityUnderutilizedQueue() // 10 preempted from app1, 15 preempted from app2, and nothing preempted // from app3 - verify(mDisp, times(10)).handle(argThat( + verify(eventHandler, times(10)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(15)).handle(argThat( + verify(eventHandler, times(15)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @@ -135,13 +136,13 @@ public void testPreemptionForLowestPriorityUnderutilizedQueue() // 10 preempted from app1, 15 preempted from app2, and nothing preempted // from app3 - verify(mDisp, times(10)).handle(argThat( + verify(eventHandler, times(10)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(15)).handle(argThat( + verify(eventHandler, times(15)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @@ -185,10 +186,10 @@ public void testPreemptionWontHappenBetweenSatisfiedQueues() policy.editSchedule(); // Nothing preempted - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -236,16 +237,16 @@ public void testPreemptionForMultipleQueuesInTheSamePriorityBuckets() // from app3/app4 // (After preemption, a has 35 - 23 = 12, b has 25 - 6 = 19, so a:b after // preemption is 1.58, close to 1.50) - verify(mDisp, times(23)).handle(argThat( + verify(eventHandler, times(23)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(6)).handle(argThat( + verify(eventHandler, times(6)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); } @@ -295,16 +296,16 @@ public void testPreemptionForPriorityAndDisablePreemption() // We suppose to preempt some resource from A, but now since queueA // disables preemption, so we need to preempt some resource from B and // some from C even if C has higher priority than A - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(9)).handle(argThat( + verify(eventHandler, times(9)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, times(19)).handle(argThat( + verify(eventHandler, times(19)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); } @@ -352,16 +353,16 @@ public void testPriorityPreemptionForHierarchicalOfQueues() // Preemption should first divide capacities between a / b, and b2 should // get less preemption than b1 (because b2 has higher priority) - verify(mDisp, times(6)).handle(argThat( + verify(eventHandler, times(6)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, times(13)).handle(argThat( + verify(eventHandler, times(13)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); - verify(mDisp, times(10)).handle(argThat( + verify(eventHandler, times(10)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); } @@ -408,16 +409,16 @@ public void testPriorityPreemptionWithMandatoryResourceForHierarchicalOfQueues() // Preemption should first divide capacities between a / b, and b1 should // get less preemption than b2 (because b1 has higher priority) - verify(mDisp, times(3)).handle(argThat( + verify(eventHandler, times(3)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); - verify(mDisp, times(2)).handle(argThat( + verify(eventHandler, times(2)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); } @@ -474,23 +475,23 @@ public void testPriorityPreemptionWithMultipleResource() // get less preemption than a1 (because a2 has higher priority). More // specifically, a2 will not get preempted since the resource preempted // from a1 can satisfy b already. - verify(mDisp, times(7)).handle(argThat( + verify(eventHandler, times(7)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @Test public void test3ResourceTypesInterQueuePreemption() throws IOException { - rc = new DominantResourceCalculator(); - when(cs.getResourceCalculator()).thenReturn(rc); + resourceCalculator = new DominantResourceCalculator(); + when(cs.getResourceCalculator()).thenReturn(resourceCalculator); // Initialize resource map String RESOURCE_1 = "res1"; @@ -535,7 +536,7 @@ public void test3ResourceTypesInterQueuePreemption() throws IOException { buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -589,10 +590,10 @@ public void testPriorityPreemptionForBalanceBetweenSatisfiedQueues() // b and c has same relativeAssigned=1.0f(idealAssigned / guaranteed), // since c has higher priority, c will be put in mostUnderServedQueue and // get all remain 30 capacity. - verify(mDisp, times(10)).handle(argThat( + verify(eventHandler, times(10)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java index 0300e19934d30..96e91d5945b7e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java @@ -22,6 +22,7 @@ import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.junit.Before; import org.junit.Test; @@ -90,13 +91,13 @@ public void testNodePartitionPreemptionRespectGuaranteedCapacity() // 30 preempted from app1, 30 preempted from app4, and nothing preempted // from app2/app3 - verify(mDisp, times(30)).handle( + verify(eventHandler, times(30)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); - verify(mDisp, times(30)).handle( + verify(eventHandler, times(30)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(4)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(3)))); } @@ -146,9 +147,9 @@ public void testNodePartitionPreemptionNotHappenBetweenSatisfiedQueues() policy.editSchedule(); // No preemption happens - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); } @@ -195,9 +196,9 @@ public void testNodePartitionPreemptionOfIgnoreExclusivityAndRespectCapacity() // 30 preempted from app1, 30 preempted from app4, and nothing preempted // from app2/app3 - verify(mDisp, times(20)).handle( + verify(eventHandler, times(20)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); - verify(mDisp, times(30)).handle( + verify(eventHandler, times(30)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); } @@ -247,16 +248,16 @@ public void testNodePartitionPreemptionOfSkippingAMContainer() policy.editSchedule(); // 4 from app1 - verify(mDisp, times(4)).handle( + verify(eventHandler, times(4)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); // 19 from app2-app5 - verify(mDisp, times(19)).handle( + verify(eventHandler, times(19)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); - verify(mDisp, times(19)).handle( + verify(eventHandler, times(19)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(3)))); - verify(mDisp, times(19)).handle( + verify(eventHandler, times(19)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(4)))); - verify(mDisp, times(19)).handle( + verify(eventHandler, times(19)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(5)))); } @@ -308,16 +309,16 @@ public void testNodePartitionPreemptionOfAMContainer() policy.editSchedule(); // 4 from app1 - verify(mDisp, times(19)).handle( + verify(eventHandler, times(19)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); // 19 from app2-app5 - verify(mDisp, times(19)).handle( + verify(eventHandler, times(19)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); - verify(mDisp, times(19)).handle( + verify(eventHandler, times(19)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(3)))); - verify(mDisp, times(20)).handle( + verify(eventHandler, times(20)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(4)))); - verify(mDisp, times(20)).handle( + verify(eventHandler, times(20)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(5)))); } @@ -370,11 +371,11 @@ public void testNodePartitionDisablePreemptionForSingleLevelQueue() policy.editSchedule(); // 10 preempted from app1, nothing preempted from app2-app3 - verify(mDisp, times(10)).handle( + verify(eventHandler, times(10)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(3)))); } @@ -429,11 +430,11 @@ public void testNodePartitionNonAccessibleQueuesSharePartitionedResource() policy.editSchedule(); // 15 will be preempted app2/app3 - verify(mDisp, times(15)).handle( + verify(eventHandler, times(15)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); - verify(mDisp, times(15)).handle( + verify(eventHandler, times(15)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(3)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); } @@ -499,13 +500,13 @@ public void testHierarchyPreemptionForMultiplePartitions() policy.editSchedule(); // 10 will be preempted from app1 (a1) /app4 (b2) - verify(mDisp, times(10)).handle( + verify(eventHandler, times(10)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); - verify(mDisp, times(10)).handle( + verify(eventHandler, times(10)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(4)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(3)))); } @@ -559,9 +560,9 @@ public void testHierarchyPreemptionForDifferenceAcessibility() buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(50)).handle( + verify(eventHandler, times(50)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); - verify(mDisp, times(30)).handle( + verify(eventHandler, times(30)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); } @@ -610,13 +611,13 @@ public void testNodePartitionPreemptionWithVCoreResource() throws IOException { // 30 preempted from app1, 30 preempted from app4, and nothing preempted // from app2/app3 - verify(mDisp, times(30)).handle( + verify(eventHandler, times(30)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); - verify(mDisp, times(30)).handle( + verify(eventHandler, times(30)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(4)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); - verify(mDisp, never()).handle( + verify(eventHandler, never()).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(3)))); } @@ -683,7 +684,7 @@ public void testNormalizeGuaranteeWithMultipleResource() throws IOException { buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(7)).handle( + verify(eventHandler, times(7)).handle( argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); riMap.remove(RESOURCE_1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForReservedContainers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForReservedContainers.java index 5410931193c7a..6c723493ced1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForReservedContainers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForReservedContainers.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.junit.Before; import org.junit.Test; @@ -82,14 +83,14 @@ public void testPreemptionForSimpleReservedContainer() throws IOException { // Total 5 preempted from app1 at n1, don't preempt container from other // app/node - verify(mDisp, times(5)).handle(argThat( + verify(eventHandler, times(5)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(5)).handle( + verify(eventHandler, times(5)).handle( argThat(new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "root.a", NodeId.newInstance("n1", 1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -136,16 +137,16 @@ public void testUseReservedAndFifoSelectorTogether() throws IOException { buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(15)).handle(argThat( + verify(eventHandler, times(15)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(10)).handle( + verify(eventHandler, times(10)).handle( argThat(new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "a", NodeId.newInstance("n1", 1)))); - verify(mDisp, times(5)).handle( + verify(eventHandler, times(5)).handle( argThat(new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "a", NodeId.newInstance("n2", 1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -198,16 +199,16 @@ public void testReservedSelectorSkipsAMContainer() throws IOException { buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(15)).handle(argThat( + verify(eventHandler, times(15)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle( + verify(eventHandler, times(0)).handle( argThat(new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "a", NodeId.newInstance("n1", 1)))); - verify(mDisp, times(15)).handle( + verify(eventHandler, times(15)).handle( argThat(new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "a", NodeId.newInstance("n2", 1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -256,10 +257,10 @@ public void testPreemptionForReservedContainerRespectGuaranteedResource() buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -308,10 +309,10 @@ public void testPreemptionForReservedContainerWhichHasAvailableResource() // Total 4 preempted from app1 at n1, don't preempt container from other // app/node - verify(mDisp, times(4)).handle(argThat( + verify(eventHandler, times(4)).handle(argThat( new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "root.a", NodeId.newInstance("n1", 1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "root.a", NodeId.newInstance("n2", 1)))); } @@ -361,10 +362,10 @@ public void testPreemptionForReservedContainerWhichHasNondivisibleAvailableResou // Total 4 preempted from app1 at n1, don't preempt container from other // app/node - verify(mDisp, times(4)).handle(argThat( + verify(eventHandler, times(4)).handle(argThat( new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "root.a", NodeId.newInstance("n1", 1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "root.a", NodeId.newInstance("n2", 1)))); } @@ -415,16 +416,16 @@ public void testPreemptionForReservedContainerRespectAvailableResources() policy.editSchedule(); // No preemption should happen - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "a", NodeId.newInstance("n1", 1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "a", NodeId.newInstance("n2", 1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "a", NodeId.newInstance("n3", 1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new IsPreemptionRequestForQueueAndNode(getAppAttemptId(1), "a", NodeId.newInstance("n4", 1)))); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java index 7e3d6866ffdd4..27208020185f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java @@ -19,8 +19,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.junit.Before; @@ -28,8 +31,10 @@ import java.io.IOException; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -40,8 +45,8 @@ public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF @Before public void setup() { super.setup(); - rc = new DominantResourceCalculator(); - when(cs.getResourceCalculator()).thenReturn(rc); + resourceCalculator = new DominantResourceCalculator(); + when(cs.getResourceCalculator()).thenReturn(resourceCalculator); policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock); } @@ -75,10 +80,10 @@ public void testInterQueuePreemptionWithMultipleResource() throws Exception { policy.editSchedule(); // Preemption should happen in Queue b, preempt <10,20> to Queue a - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(5)).handle(argThat( + verify(eventHandler, times(5)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -121,7 +126,7 @@ public void testInterQueuePreemptionWithNaturalTerminationFactor() buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -166,11 +171,79 @@ public void test3ResourceTypesInterQueuePreemption() throws IOException { buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } + + @SuppressWarnings("unchecked") + @Test + public void testInterQueuePreemptionWithStrictAndRelaxedDRF() + throws IOException { + + /* + * root + * / \ \ + * a b c + * + * A / B / C have 33.3 / 33.3 / 33.4 resources + * Total cluster resource have mem=61440, cpu=600 + * + * +=================+========================+ + * | used in queue a | user limit for queue a | + * +=================+========================+ + * | 61440:60 | 20480:200 | + * +=================+========================+ + * In this case, the used memory is over the user limit but the used vCores + * is not. If conservative DRF is true, preemptions will not occur. + * If conservative DRF is false (default) preemptions will occur. + */ + String labelsConfig = "=61440:600,true;"; + String nodesConfig = "n1= res=61440:600"; // n1 is default partition + String queuesConfig = + // guaranteed,max,used,pending,reserved + "root(=[61440:600 61440:600 61440:600 20480:20 0]);" + // root + "-a(=[20480:200 61440:600 61440:60 0:0 0]);" + // b + "-b(=[20480:200 61440:600 0:0 20480:20 0]);" + // a + "-c(=[20480:200 61440:600 0:0 0:0 0])"; // c + String appsConfig = + //queueName\t(priority,resource,host,expression,#repeat,reserved) + "a\t" + "(1,1024:1,n1,,60,false,0:0,user1);" + // app1 in a + "b\t" + "(1,0:0,n1,,0,false,20480:20,user2);"; // app2 in b + + conf.setBoolean( + CapacitySchedulerConfiguration.CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + true); + + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + Resource ul = Resource.newInstance(20480, 20); + when(((LeafQueue)(cs.getQueue("root.a"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + + verify(eventHandler, times(0)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + + reset(eventHandler); + + conf.setBoolean( + CapacitySchedulerConfiguration.CROSS_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + false); + + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + ul = Resource.newInstance(20480, 20); + when(((LeafQueue)(cs.getQueue("root.a"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + + verify(eventHandler, times(20)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java index e2336fb1e4438..807796242744e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueue.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.junit.Before; import org.junit.Test; @@ -35,7 +36,7 @@ */ public class TestProportionalCapacityPreemptionPolicyIntraQueue extends - ProportionalCapacityPreemptionPolicyMockFramework { + ProportionalCapacityPreemptionPolicyMockFramework { @Before public void setup() { super.setup(); @@ -101,10 +102,10 @@ public void testSimpleIntraQueuePreemption() throws IOException { // For queue B, app3 and app4 were of lower priority. Hence take 8 // containers from them by hitting the intraQueuePreemptionDemand of 20%. - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); - verify(mDisp, times(7)).handle(argThat( + verify(eventHandler, times(7)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @@ -156,10 +157,10 @@ public void testNoIntraQueuePreemptionWithPreemptionDisabledOnQueues() buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @@ -215,16 +216,16 @@ public void testNoPreemptionForSamePriorityApps() throws IOException { policy.editSchedule(); // For queue B, none of the apps should be preempted. - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(5)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(6)))); } @@ -272,16 +273,16 @@ public void testNoPreemptionWhenQueueIsUnderCapacityLimit() // For queue A/B, none of the apps should be preempted as used capacity // is under 50%. - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); } @@ -337,7 +338,7 @@ public void testLimitPreemptionWithMaxIntraQueuePreemptableLimit() // For queueB, eventhough app4 needs 100 resources, only 30 resources were // preempted. (max is 50% of guaranteed cap of any queue // "maxIntraQueuePreemptable") - verify(mDisp, times(30)).handle(argThat( + verify(eventHandler, times(30)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @@ -391,7 +392,7 @@ public void testLimitPreemptionWithTotalPreemptedResourceAllowed() // For queue B eventhough app4 needs 100 resources, only 10 resources were // preempted. This is the 10% limit of TOTAL_PREEMPTION_PER_ROUND. - verify(mDisp, times(10)).handle(argThat( + verify(eventHandler, times(10)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @@ -448,10 +449,10 @@ public void testAlreadySelectedContainerFromInterQueuePreemption() // As per intra queue preemption algorithm, 20 more containers were needed // for app2 (in queue a). Inter queue pre-emption had already preselected 9 // containers and hence preempted only 11 more. - verify(mDisp, times(20)).handle(argThat( + verify(eventHandler, times(20)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -502,10 +503,10 @@ public void testSkipAMContainersInInterQueuePreemption() throws IOException { policy.editSchedule(); // Ensure that only 9 containers are preempted from app2 (sparing 1 AM) - verify(mDisp, times(11)).handle(argThat( + verify(eventHandler, times(11)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(9)).handle(argThat( + verify(eventHandler, times(9)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -552,10 +553,10 @@ public void testSkipAMContainersInInterQueuePreemptionSingleApp() policy.editSchedule(); // Make sure that app1's Am container is spared. Only 9/10 is preempted. - verify(mDisp, times(9)).handle(argThat( + verify(eventHandler, times(9)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, never()).handle(argThat( + verify(eventHandler, never()).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -595,7 +596,7 @@ public void testNoPreemptionForSingleApp() throws IOException { policy.editSchedule(); // Ensure there are 0 preemptions since only one app is running in queue. - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -638,13 +639,13 @@ public void testOverutilizedQueueResourceWithInterQueuePreemption() policy.editSchedule(); // Complete demand request from QueueB for 20 resource must be preempted. - verify(mDisp, times(20)).handle(argThat( + verify(eventHandler, times(20)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @@ -703,11 +704,11 @@ public void testNodePartitionIntraQueuePreemption() throws IOException { policy.editSchedule(); // 20 preempted from app1 - verify(mDisp, times(20)) + verify(eventHandler, times(20)) .handle(argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); - verify(mDisp, never()) + verify(eventHandler, never()) .handle(argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); - verify(mDisp, never()) + verify(eventHandler, never()) .handle(argThat(new IsPreemptionRequestFor(getAppAttemptId(3)))); } @@ -785,26 +786,26 @@ public void testComplexIntraQueuePreemption() throws IOException { // High priority app in queueA has 30 resource demand. But low priority // app has only 5 resource. Hence preempt 4 here sparing AM. - verify(mDisp, times(4)).handle(argThat( + verify(eventHandler, times(4)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); // Multiple high priority apps has demand of 17. This will be preempted // from another set of low priority apps. - verify(mDisp, times(4)).handle(argThat( + verify(eventHandler, times(4)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); - verify(mDisp, times(9)).handle(argThat( + verify(eventHandler, times(9)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(6)))); - verify(mDisp, times(4)).handle(argThat( + verify(eventHandler, times(4)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(5)))); // Only 3 resources will be freed in this round for queue C as we // are trying to save AM container. - verify(mDisp, times(2)).handle(argThat( + verify(eventHandler, times(2)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(10)))); - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(11)))); } @@ -861,7 +862,7 @@ public void testIntraQueuePreemptionWithTwoUsers() // 14 more (5 is already running) eventhough demand is for 30. Ideally we // must preempt 15. But 15th container will bring user1's usage to 20 which // is same as user-limit. Hence skip 15th container. - verify(mDisp, times(14)).handle(argThat( + verify(eventHandler, times(14)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @@ -927,19 +928,19 @@ public void testComplexNodePartitionIntraQueuePreemption() // Label X: app3 has demand of 20 for label X. Hence app2 will loose // 4 (sparing AM) and 16 more from app1 till preemption limit is met. - verify(mDisp, times(16)) + verify(eventHandler, times(16)) .handle(argThat(new IsPreemptionRequestFor(getAppAttemptId(1)))); - verify(mDisp, times(4)) + verify(eventHandler, times(4)) .handle(argThat(new IsPreemptionRequestFor(getAppAttemptId(2)))); // Default Label:For a demand of 30, preempt from all low priority // apps of default label. 25 will be preempted as preemption limit is // met. - verify(mDisp, times(1)) + verify(eventHandler, times(1)) .handle(argThat(new IsPreemptionRequestFor(getAppAttemptId(8)))); - verify(mDisp, times(2)) + verify(eventHandler, times(2)) .handle(argThat(new IsPreemptionRequestFor(getAppAttemptId(7)))); - verify(mDisp, times(22)) + verify(eventHandler, times(22)) .handle(argThat(new IsPreemptionRequestFor(getAppAttemptId(6)))); } @@ -1017,10 +1018,10 @@ public void testIntraQueuePreemptionAfterQueueDropped() // For queue B, app3 and app4 were of lower priority. Hence take 8 // containers from them by hitting the intraQueuePreemptionDemand of 20%. - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); - verify(mDisp, times(7)).handle(argThat( + verify(eventHandler, times(7)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java index 6e56cb5a24c3b..eb9d21836da51 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java @@ -24,6 +24,7 @@ import java.io.IOException; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.junit.Before; import org.junit.Test; @@ -85,7 +86,7 @@ public void testIntraQueuePreemptionFairOrderingPolicyEnabledOneAppPerUser() buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(20)).handle(argThat( + verify(eventHandler, times(20)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -133,7 +134,7 @@ public void testIntraQueuePreemptionFifoOrderingPolicyEnabled() buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(5)).handle(argThat( + verify(eventHandler, times(5)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); @@ -156,7 +157,7 @@ public void testIntraQueuePreemptionFifoOrderingPolicyEnabled() buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(15)).handle(argThat( + verify(eventHandler, times(15)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -208,7 +209,7 @@ public void testIntraQueuePreemptionFairOrderingPolicyMulitipleAppsPerUser() buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); policy.editSchedule(); - verify(mDisp, times(20)).handle(argThat( + verify(eventHandler, times(20)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -258,18 +259,18 @@ public void testIntraQueuePreemptionFifoOrderingPolicyMultipleAppsPerUser() // app3 is the younges and also over its user limit. 5 should be preempted // from app3 until it comes down to user3's user limit. - verify(mDisp, times(5)).handle(argThat( + verify(eventHandler, times(5)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); // User1's app2 is its youngest. 19 should be preempted from app2, leaving // only the AM - verify(mDisp, times(19)).handle(argThat( + verify(eventHandler, times(19)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); // Preempt the remaining resource from User1's oldest app1. - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java index ba5f4d157347a..9fb08cd3bc041 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.junit.Before; import org.junit.Test; @@ -33,7 +34,7 @@ */ public class TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit extends - ProportionalCapacityPreemptionPolicyMockFramework { + ProportionalCapacityPreemptionPolicyMockFramework { @Before public void setup() { super.setup(); @@ -94,7 +95,7 @@ public void testSimpleIntraQueuePreemptionWithTwoUsers() // app2 needs more resource and its well under its user-limit. Hence preempt // resources from app1. - verify(mDisp, times(30)).handle(argThat( + verify(eventHandler, times(30)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -149,7 +150,7 @@ public void testNoIntraQueuePreemptionWithSingleUser() // app2 needs more resource. Since app1,2 are from same user, there wont be // any preemption. - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -206,7 +207,7 @@ public void testNoIntraQueuePreemptionWithTwoUserUnderUserLimit() // app2 needs more resource. Since app1,2 are from same user, there wont be // any preemption. - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -262,7 +263,7 @@ public void testSimpleIntraQueuePreemptionWithTwoUsersWithAppPriority() // app2 needs more resource and its well under its user-limit. Hence preempt // resources from app1 even though its priority is more than app2. - verify(mDisp, times(30)).handle(argThat( + verify(eventHandler, times(30)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -323,7 +324,7 @@ public void testIntraQueuePreemptionOfUserLimitWithMultipleApps() // app2/app4 needs more resource and its well under its user-limit. Hence // preempt resources from app3 (compare to app1, app3 has low priority). - verify(mDisp, times(9)).handle(argThat( + verify(eventHandler, times(9)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } @@ -384,16 +385,16 @@ public void testNoPreemptionOfUserLimitWithMultipleAppsAndSameUser() // app2/app4 needs more resource and its well under its user-limit. Hence // preempt resources from app3 (compare to app1, app3 has low priority). - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); } @@ -451,7 +452,7 @@ public void testIntraQueuePreemptionOfUserLimitWitAppsOfDifferentPriority() // app2/app4 needs more resource and its well under its user-limit. Hence // preempt resources from app1 (compare to app3, app1 has low priority). - verify(mDisp, times(9)).handle(argThat( + verify(eventHandler, times(9)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -508,10 +509,10 @@ public void testIntraQueuePreemptionOfUserLimitInTwoQueues() // app2/app4 needs more resource and its well under its user-limit. Hence // preempt resources from app1 (compare to app3, app1 has low priority). - verify(mDisp, times(4)).handle(argThat( + verify(eventHandler, times(4)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(4)).handle(argThat( + verify(eventHandler, times(4)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(5)))); } @@ -566,10 +567,10 @@ public void testIntraQueuePreemptionWithTwoRequestingUsers() // app2 needs more resource and its well under its user-limit. Hence preempt // resources from app1. - verify(mDisp, times(9)).handle(argThat( + verify(eventHandler, times(9)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -626,10 +627,10 @@ public void testNoIntraQueuePreemptionIfBelowUserLimitAndLowPriorityExtraUsers() // app2/app4 needs more resource and its well under its user-limit. Hence // preempt resources from app1 (compare to app3, app1 has low priority). - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -686,10 +687,10 @@ public void testNoIntraQueuePreemptionIfBelowUserLimitAndSamePriorityExtraUsers( // app2/app4 needs more resource and its well under its user-limit. Hence // preempt resources from app1 (compare to app3, app1 has low priority). - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -746,10 +747,10 @@ public void testNoIntraQueuePreemptionIfBelowUserLimitAndHighPriorityExtraUsers( // app2/app4 needs more resource and its well under its user-limit. Hence // preempt resources from app1 (compare to app3, app1 has low priority). - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -806,10 +807,10 @@ public void testNoIntraQueuePreemptionWithUserLimitDeadzone() // app2 needs more resource and its well under its user-limit. Hence preempt // 3 resources (9GB) from app1. We will not preempt last container as it may // pull user's usage under its user-limit. - verify(mDisp, times(3)).handle(argThat( + verify(eventHandler, times(3)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); } @@ -868,10 +869,10 @@ public void testIntraQueuePreemptionWithUserLimitDeadzoneAndPriority() // app2 needs more resource and its well under its user-limit. Hence preempt // 3 resources (9GB) from app1. We will not preempt last container as it may // pull user's usage under its user-limit. - verify(mDisp, times(3)).handle(argThat( + verify(eventHandler, times(3)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); - verify(mDisp, times(0)).handle(argThat( + verify(eventHandler, times(0)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); @@ -892,7 +893,7 @@ public void testIntraQueuePreemptionWithUserLimitDeadzoneAndPriority() // app2 has priority demand within same user 'user1'. However user1's used // is alredy under UL. Hence no preemption. We will still get 3 container // while asserting as it was aleady selected in earlier round. - verify(mDisp, times(3)).handle(argThat( + verify(eventHandler, times(3)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } @@ -927,7 +928,7 @@ public void testSimpleIntraQueuePreemptionOneUserUnderOneUserAtOneUserAbove() // app2 is right at its user limit and app1 needs one resource. Should // preempt 1 container. - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java index 1f744877eb5c1..ee88d3161b2e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java @@ -18,15 +18,20 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.junit.Before; import org.junit.Test; import java.io.IOException; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -35,14 +40,14 @@ */ public class TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF extends - ProportionalCapacityPreemptionPolicyMockFramework { + ProportionalCapacityPreemptionPolicyMockFramework { @Before public void setup() { super.setup(); conf.setBoolean( CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true); - rc = new DominantResourceCalculator(); - when(cs.getResourceCalculator()).thenReturn(rc); + resourceCalculator = new DominantResourceCalculator(); + when(cs.getResourceCalculator()).thenReturn(resourceCalculator); policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock); } @@ -102,14 +107,88 @@ public void testSimpleIntraQueuePreemptionWithVCoreResource() // For queue B, app3 and app4 were of lower priority. Hence take 8 // containers from them by hitting the intraQueuePreemptionDemand of 20%. - verify(mDisp, times(1)).handle(argThat( + verify(eventHandler, times(1)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); - verify(mDisp, times(3)).handle(argThat( + verify(eventHandler, times(3)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); } + @SuppressWarnings("unchecked") + @Test + public void testIntraQueuePreemptionFairOrderingWithStrictAndRelaxedDRF() + throws IOException { + /** + * Continue to allow intra-queue preemption when only one of the user's + * resources is above the user limit. + * Queue structure is: + * + *
    +     *       root
    +     *     /  |
    +     *    a   b
    +     * 
    + * + * Guaranteed resource of a and b are 30720:300 and 30720:300 Total cluster + * resource = 61440:600. + * Scenario: Queue B has one running app using 61720:60 resources with no + * pending resources, and one app with no used resources and 30720:30 + * pending resources. + * + * The first part of the test is to show what happens when the conservative + * DRF property is set. Since the memory is above and the vcores is below + * the user limit, only the minimum number of containers is allowed. + * In the second part, since conservative DRF is relaxed, all containers + * needed are allowed to be preempted (minus the AM size). + */ + + conf.set(CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ORDER_POLICY, + "userlimit_first"); + conf.set(CapacitySchedulerConfiguration.PREFIX + + "root.b." + CapacitySchedulerConfiguration.ORDERING_POLICY, "fair"); + conf.setBoolean( + CapacitySchedulerConfiguration.IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + true); + + String labelsConfig = "=61440:600,true;"; + String nodesConfig = // n1 has no label + "n1= res=61440:600"; + String queuesConfig = + // guaranteed,max,used,pending,reserved + "root(=[61440:600 61440:600 61440:600 30720:30 0]);" + // root + "-a(=[30720:300 61440:600 0:0 0:0 0]);" + // a + "-b(=[30720:300 61440:600 61440:60 30720:30 0]);"; // b + + String appsConfig = + "b\t" + "(1,1024:1,n1,,60,false,0:0,user1);" + // app1 in b + "b\t" + "(1,0:0,n1,,0,false,30720:30,user3);"; // app2 in b + + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + Resource ul = Resource.newInstance(30720, 300); + when(((LeafQueue)(cs.getQueue("root.b"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + + verify(eventHandler, times(6)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + reset(eventHandler); + + conf.setBoolean( + CapacitySchedulerConfiguration.IN_QUEUE_PREEMPTION_CONSERVATIVE_DRF, + false); + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + when(((LeafQueue)(cs.getQueue("root.b"))) + .getResourceLimitForAllUsers(any(), any(), any(), any()) + ).thenReturn(ul); + policy.editSchedule(); + verify(eventHandler, times(29)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + } + @Test public void testIntraQueuePreemptionWithDominantVCoreResource() throws IOException { @@ -165,13 +244,13 @@ public void testIntraQueuePreemptionWithDominantVCoreResource() // For queue B, app3 and app4 were of lower priority. Hence take 4 // containers. - verify(mDisp, times(9)).handle(argThat( + verify(eventHandler, times(9)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); - verify(mDisp, times(4)).handle(argThat( + verify(eventHandler, times(4)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(4)))); - verify(mDisp, times(4)).handle(argThat( + verify(eventHandler, times(4)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(5)))); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyMockFramework.java index 964a23085dc80..d59c8548e4a7b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyMockFramework.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java index ec71e67f591df..2e7b01ed50d9a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.log4j.Level; @@ -57,10 +58,10 @@ public void testPreemptionToBalanceDisabled() throws IOException { policy.editSchedule(); // I_A: A:30 B:35 C:35, preempt 5 from B and 15 from C to A - verify(mDisp, times(5)).handle(argThat( + verify(eventHandler, times(5)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, times(15)).handle(argThat( + verify(eventHandler, times(15)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); @@ -99,10 +100,10 @@ public void testPreemptionToBalanceEnabled() throws IOException { policy.editSchedule(); // I_A: A:33 B:33 C:33, preempt 7 from B and 17 from C to A - verify(mDisp, times(7)).handle(argThat( + verify(eventHandler, times(7)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(2)))); - verify(mDisp, times(17)).handle(argThat( + verify(eventHandler, times(17)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); @@ -142,7 +143,7 @@ public void testPreemptionToBalanceUsedPlusPendingLessThanGuaranteed() policy.editSchedule(); // I_A: A:15 B:42 C:43, preempt 7 from B and 17 from C to A - verify(mDisp, times(8)).handle(argThat( + verify(eventHandler, times(8)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(3)))); @@ -178,7 +179,7 @@ public void testPreemptionToBalanceWithVcoreResource() throws IOException { policy.editSchedule(); // 21 containers will be preempted here - verify(mDisp, times(21)).handle(argThat( + verify(eventHandler, times(21)).handle(argThat( new TestProportionalCapacityPreemptionPolicy. IsPreemptionRequestFor(getAppAttemptId(2)))); @@ -240,7 +241,7 @@ public void testPreemptionToBalanceWithConfiguredTimeout() throws IOException { assertEquals(hasFifoSelector, true); // 21 containers will be preempted here - verify(mDisp, times(21)).handle(argThat( + verify(eventHandler, times(21)).handle(argThat( new TestProportionalCapacityPreemptionPolicy. IsPreemptionRequestFor(getAppAttemptId(2)))); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ContainerSpecification.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ContainerSpecification.java new file mode 100644 index 0000000000000..eb24c1a65abab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ContainerSpecification.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.util.resource.Resources; + +import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework.parseResourceFromString; + +public class ContainerSpecification { + Priority priority; + Resource resource = Resource.newInstance(0, 0); + Resource pendingResource = Resource.newInstance(0, 0); + NodeId nodeId; + String label; + int repeat; + boolean reserved; + String username; + + private ContainerSpecification(Builder builder) { + if (builder.resource != null) { + Resources.addTo(resource, builder.resource); + } + if (builder.pendingResource != null) { + Resources.addTo(pendingResource, builder.pendingResource); + } + this.priority = builder.priority; + this.nodeId = builder.nodeId; + this.label = builder.label; + this.repeat = builder.repeat; + this.reserved = builder.reserved; + this.username = builder.username; + } + + static class Builder { + private Priority priority; + private Resource resource; + private NodeId nodeId; + private String label; + private int repeat; + private boolean reserved; + private Resource pendingResource; + private String username = "user"; + + public static Builder create() { + return new Builder(); + } + + Builder withPriority(String value) { + this.priority = Priority.newInstance(Integer.valueOf(value)); + return this; + } + + Builder withResource(String value) { + this.resource = parseResourceFromString(value); + return this; + } + + Builder withHostname(String value) { + this.nodeId = NodeId.newInstance(value, 1); + return this; + } + + Builder withLabel(String value) { + this.label = value; + return this; + } + + Builder withRepeat(String repeat) { + this.repeat = Integer.valueOf(repeat); + return this; + } + + Builder withReserved(String value) { + this.reserved = Boolean.valueOf(value); + return this; + } + + Builder withPendingResource(String value) { + this.pendingResource = parseResourceFromString(value); + return this; + } + + Builder withUsername(String value) { + this.username = value; + return this; + } + + public ContainerSpecification build() { + return new ContainerSpecification(this); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockApplication.java new file mode 100644 index 0000000000000..f40b4a05ff0a1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockApplication.java @@ -0,0 +1,165 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeSet; + +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class MockApplication { + private static final Logger LOG = LoggerFactory.getLogger(MockApplication.class); + private List liveContainers = new ArrayList<>(); + private List reservedContainers = new ArrayList<>(); + + private ApplicationId appId; + final String containersConfig; + final String queueName; + ApplicationAttemptId appAttemptId; + FiCaSchedulerApp app; + + MockApplication(int id, String containersConfig, String queueName) { + this.appId = ApplicationId.newInstance(0L, id); + this.containersConfig = containersConfig; + this.queueName = queueName; + + //dynamic fields + this.appAttemptId = ApplicationAttemptId + .newInstance(appId, 1); + //this must be the last step + setupInitialMocking(queueName); + } + + private void setupInitialMocking(String queueName) { + this.app = mock(FiCaSchedulerApp.class); + when(app.getAMResource(anyString())) + .thenReturn(Resources.createResource(0, 0)); + when(app.getLiveContainers()).thenReturn(liveContainers); + when(app.getReservedContainers()).thenReturn(reservedContainers); + when(app.getApplicationAttemptId()).thenReturn(appAttemptId); + when(app.getApplicationId()).thenReturn(appId); + when(app.getQueueName()).thenReturn(queueName); + } + + private void addLiveContainer(RMContainer c) { + this.liveContainers.add(c); + } + + private void addReservedContainer(RMContainer c) { + this.reservedContainers.add(c); + } + + void addMockContainer(MockContainer mockContainer, + FiCaSchedulerNode schedulerNode, LeafQueue queue) { + int containerId = mockContainer.containerId; + ContainerSpecification containerSpec = mockContainer.containerSpec; + + if (containerId == 1) { + when(app.getAMResource(containerSpec.label)).thenReturn(containerSpec.resource); + when(app.getAppAMNodePartitionName()).thenReturn(containerSpec.label); + } + + if (containerSpec.reserved) { + addReservedContainer(mockContainer.rmContainerMock); + } else { + addLiveContainer(mockContainer.rmContainerMock); + } + + // Add container to scheduler-node + addContainerToSchedulerNode(schedulerNode, mockContainer.rmContainerMock, containerSpec.reserved); + + // If this is a non-exclusive allocation + String partition = null; + if (containerSpec.label.isEmpty() + && !(partition = schedulerNode.getPartition()) + .isEmpty()) { + Map> ignoreExclusivityContainers = queue + .getIgnoreExclusivityRMContainers(); + if (!ignoreExclusivityContainers.containsKey(partition)) { + ignoreExclusivityContainers.put(partition, new TreeSet<>()); + } + ignoreExclusivityContainers.get(partition).add(mockContainer.rmContainerMock); + LOG.info("Added an ignore-exclusivity container to partition {}, new size is: {}", partition, ignoreExclusivityContainers.get(partition).size()); + + } + LOG.debug("add container to app=" + appAttemptId + " res=" + containerSpec.resource + " node=" + + containerSpec.nodeId + " nodeLabelExpression=" + containerSpec.label + " partition=" + + partition); + } + + void addAggregatedContainerData(ContainerSpecification containerSpec, + Resource usedResources) { + // If app has 0 container, and it has only pending, still make sure to + // update label. + if (containerSpec.repeat == 0) { + when(app.getAppAMNodePartitionName()).thenReturn(containerSpec.label); + } + + // Some more app specific aggregated data can be better filled here. + when(app.getPriority()).thenReturn(containerSpec.priority); + when(app.getUser()).thenReturn(containerSpec.username); + when(app.getCurrentConsumption()).thenReturn(usedResources); + when(app.getCurrentReservation()) + .thenReturn(Resources.createResource(0, 0)); + + Map pendingForDefaultPartition = + new HashMap<>(); + // Add for default partition for now. + pendingForDefaultPartition.put(containerSpec.label, containerSpec.pendingResource); + when(app.getTotalPendingRequestsPerPartition()) + .thenReturn(pendingForDefaultPartition); + + // need to set pending resource in resource usage as well + ResourceUsage ru = Mockito.spy(new ResourceUsage()); + ru.setUsed(containerSpec.label, usedResources); + when(ru.getCachedUsed(anyString())).thenReturn(usedResources); + when(app.getAppAttemptResourceUsage()).thenReturn(ru); + when(app.getSchedulingResourceUsage()).thenReturn(ru); + } + + private void addContainerToSchedulerNode(SchedulerNode node, RMContainer container, + boolean isReserved) { + assert node != null; + + if (isReserved) { + when(node.getReservedContainer()).thenReturn(container); + } else { + node.getCopiedListOfRunningContainers().add(container); + Resources.subtractFrom(node.getUnallocatedResource(), + container.getAllocatedResource()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockApplications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockApplications.java new file mode 100644 index 0000000000000..b16861257ffcd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockApplications.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UsersManager.User; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; +import org.apache.hadoop.yarn.util.resource.ResourceCalculator; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.when; + +class MockApplications { + private static final Logger LOG = LoggerFactory.getLogger( + MockApplications.class); + + private String config; + private ResourceCalculator resourceCalculator; + private Map nameToCSQueues; + private Map partitionToResource; + private Map nodeIdToSchedulerNodes; + private Map> userMap = new HashMap<>(); + private Map>> userResourceUsagePerLabel = new HashMap<>(); + private int id = 1; + + MockApplications(String appsConfig, + ResourceCalculator resourceCalculator, + Map nameToCSQueues, + Map partitionToResource, + Map nodeIdToSchedulerNodes) { + this.config = appsConfig; + this.resourceCalculator = resourceCalculator; + this.nameToCSQueues = nameToCSQueues; + this.partitionToResource = partitionToResource; + this.nodeIdToSchedulerNodes = nodeIdToSchedulerNodes; + init(); + } + + /** + * Format is: + *
    +   * queueName\t  // app1
    +   * (priority,resource,host,expression,#repeat,reserved)
    +   * (priority,resource,host,expression,#repeat,reserved);
    +   * queueName\t  // app2
    +   * 
    + */ + private void init() { + int mulp = -1; + for (String appConfig : config.split(";")) { + String[] appConfigComponents = appConfig.split("\t"); + String queueName = appConfigComponents[0]; + if (mulp <= 0 && appConfigComponents.length > 2 && appConfigComponents[2] != null) { + LOG.info("Mulp value: " + appConfigComponents[2]); + mulp = 100 / (Integer.parseInt(appConfigComponents[2])); + } + + String containersConfig = appConfigComponents[1]; + MockApplication mockApp = new MockApplication(id, containersConfig, queueName); + new MockContainers(mockApp, nameToCSQueues, nodeIdToSchedulerNodes); + add(mockApp); + id++; + } + setupUserResourceUsagePerLabel(resourceCalculator, mulp); + } + + private void add(MockApplication mockApp) { + // add to LeafQueue + LeafQueue queue = (LeafQueue) nameToCSQueues.get(mockApp.queueName); + queue.getApplications().add(mockApp.app); + queue.getAllApplications().add(mockApp.app); + when(queue.getMinimumAllocation()).thenReturn(Resource.newInstance(1,1)); + when(mockApp.app.getCSLeafQueue()).thenReturn(queue); + + LOG.debug("Application mock: queue: " + mockApp.queueName + ", appId:" + mockApp.app); + + Set users = userMap.computeIfAbsent(mockApp.queueName, k -> new HashSet<>()); + users.add(mockApp.app.getUser()); + + String label = mockApp.app.getAppAMNodePartitionName(); + + // Get label to queue + Map> userResourceUsagePerQueue = + userResourceUsagePerLabel.computeIfAbsent(label, k -> new HashMap<>()); + + // Get queue to user based resource map + Map userResourceUsage = + userResourceUsagePerQueue.computeIfAbsent(mockApp.queueName, k -> new HashMap<>()); + + // Get user to its resource usage. + ResourceUsage usage = userResourceUsage.get(mockApp.app.getUser()); + if (null == usage) { + usage = new ResourceUsage(); + userResourceUsage.put(mockApp.app.getUser(), usage); + } + + usage.incAMUsed(mockApp.app.getAMResource(label)); + usage.incUsed(mockApp.app.getAppAttemptResourceUsage().getUsed(label)); + } + + private void setupUserResourceUsagePerLabel(ResourceCalculator resourceCalculator, + int mulp) { + for (String label : userResourceUsagePerLabel.keySet()) { + for (String queueName : userMap.keySet()) { + LeafQueue queue = (LeafQueue) nameToCSQueues.get(queueName); + // Currently we have user-limit test support only for default label. + Resource toResourcePartition = partitionToResource.get(""); + Resource capacity = Resources.multiply(toResourcePartition, + queue.getQueueCapacities().getAbsoluteCapacity()); + Set users = userMap.get(queue.getQueueName()); + //TODO: Refactor this test class to use queue path internally like + // CS does from now on + if (users == null) { + users = userMap.get(queue.getQueuePath()); + } + when(queue.getAllUsers()).thenReturn(users); + Resource userLimit = calculateUserLimit(resourceCalculator, mulp, capacity, + users); + LOG.debug("Updating user-limit from mock: toResourcePartition=" + + toResourcePartition + ", capacity=" + capacity + + ", users.size()=" + users.size() + ", userLimit= " + userLimit + + ",label= " + label + ",queueName= " + queueName); + + setupUserToQueueSettings(label, queueName, queue, users, userLimit); + } + } + } + + private void setupUserToQueueSettings(String label, String queueName, + LeafQueue queue, Set users, Resource userLimit) { + Map userResourceUsage = + userResourceUsagePerLabel.get(label).get(queueName); + for (String userName : users) { + User user = new User(userName); + if (userResourceUsage != null) { + user.setResourceUsage(userResourceUsage.get(userName)); + } + when(queue.getUser(eq(userName))).thenReturn(user); + when(queue.getResourceLimitForAllUsers(eq(userName), + any(Resource.class), anyString(), any(SchedulingMode.class))) + .thenReturn(userLimit); + } + } + + private Resource calculateUserLimit(ResourceCalculator resourceCalculator, + int mulp, Resource capacity, Set users) { + if (mulp > 0) { + return Resources.divideAndCeil(resourceCalculator, capacity, mulp); + } else { + return Resources.divideAndCeil(resourceCalculator, capacity, users.size()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockContainer.java new file mode 100644 index 0000000000000..95f7391e96e47 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockContainer.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl; +import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class MockContainer { + ContainerSpecification containerSpec; + public int containerId; + private MockApplication mockApp; + RMContainerImpl rmContainerMock; + + MockContainer(ContainerSpecification containerSpec, + int containerId, MockApplication mockApp) { + this.containerSpec = containerSpec; + this.containerId = containerId; + this.mockApp = mockApp; + this.rmContainerMock = mock(RMContainerImpl.class); + init(); + } + + private void init() { + Container c = mock(Container.class); + when(c.getResource()).thenReturn(containerSpec.resource); + when(c.getPriority()).thenReturn(containerSpec.priority); + SchedulerRequestKey sk = SchedulerRequestKey.extractFrom(c); + when(rmContainerMock.getAllocatedSchedulerKey()).thenReturn(sk); + when(rmContainerMock.getAllocatedNode()).thenReturn(containerSpec.nodeId); + when(rmContainerMock.getNodeLabelExpression()).thenReturn(containerSpec.label); + when(rmContainerMock.getAllocatedResource()).thenReturn(containerSpec.resource); + when(rmContainerMock.getContainer()).thenReturn(c); + when(rmContainerMock.getApplicationAttemptId()).thenReturn(mockApp.appAttemptId); + when(rmContainerMock.getQueueName()).thenReturn(mockApp.queueName); + final ContainerId cId = ContainerId.newContainerId(mockApp.appAttemptId, + containerId); + when(rmContainerMock.getContainerId()).thenReturn(cId); + doAnswer(new Answer() { + @Override + public Integer answer(InvocationOnMock invocation) { + return cId.compareTo( + ((RMContainer) invocation.getArguments()[0]).getContainerId()); + } + }).when(rmContainerMock).compareTo(any(RMContainer.class)); + + if (containerId == 1) { + when(rmContainerMock.isAMContainer()).thenReturn(true); + } + + if (containerSpec.reserved) { + when(rmContainerMock.getReservedResource()).thenReturn(containerSpec.resource); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockContainers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockContainers.java new file mode 100644 index 0000000000000..5a4212d9a098a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockContainers.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; +import org.apache.hadoop.yarn.util.resource.Resources; + +import java.util.Map; + +class MockContainers { + private MockApplication mockApp; + private Map nameToCSQueues; + private Map nodeIdToSchedulerNodes; + + MockContainers(MockApplication mockApp, + Map nameToCSQueues, + Map nodeIdToSchedulerNodes) { + this.mockApp = mockApp; + this.nameToCSQueues = nameToCSQueues; + this.nodeIdToSchedulerNodes = nodeIdToSchedulerNodes; + init(); + } + + private void init() { + String containersConfig = mockApp.containersConfig; + int start = containersConfig.indexOf("=") + 1; + int end = -1; + int containerId = 1; + + while (start < containersConfig.length()) { + while (start < containersConfig.length() + && containersConfig.charAt(start) != '(') { + start++; + } + if (start >= containersConfig.length()) { + throw new IllegalArgumentException( + "Error in containers specification, line=" + containersConfig); + } + end = start + 1; + while (end < containersConfig.length() + && containersConfig.charAt(end) != ')') { + end++; + } + if (end >= containersConfig.length()) { + throw new IllegalArgumentException( + "Error in containers specification, line=" + containersConfig); + } + + // now we found start/end, get container values + String[] values = containersConfig.substring(start + 1, end).split(","); + if (values.length < 6 || values.length > 8) { + throw new IllegalArgumentException("Format to define container is:" + + "(priority,resource,host,label expression,repeat,reserved, pending)"); + } + + ContainerSpecification.Builder builder = ContainerSpecification.Builder.create() + .withPriority(values[0]) + .withResource(values[1]) + .withHostname(values[2]) + .withLabel(values[3]) + .withRepeat(values[4]) + .withReserved(values[5]); + + if (values.length >= 7) { + builder.withPendingResource(values[6]); + } + if (values.length == 8) { + builder.withUsername(values[7]); + } + ContainerSpecification containerSpec = builder.build(); + + Resource usedResources = Resource.newInstance(0, 0); + for (int i = 0; i < containerSpec.repeat; i++) { + Resources.addTo(usedResources, containerSpec.resource); + MockContainer mockContainer = new MockContainer(containerSpec, containerId, mockApp); + FiCaSchedulerNode schedulerNode = + nodeIdToSchedulerNodes.get(containerSpec.nodeId); + LeafQueue queue = (LeafQueue) nameToCSQueues.get(mockApp.queueName); + mockApp.addMockContainer(mockContainer, schedulerNode, queue); + containerId++; + } + mockApp.addAggregatedContainerData(containerSpec, usedResources); + start = end + 1; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockNodeLabelsManager.java new file mode 100644 index 0000000000000..2be8a783ce209 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockNodeLabelsManager.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; + +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework.parseResourceFromString; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.when; + +class MockNodeLabelsManager { + private static final Logger LOG = LoggerFactory.getLogger(MockNodeLabelsManager.class); + + private String config; + private final Resource clusterResource; + private final Map partitionToResource; + private final RMNodeLabelsManager nodeLabelsManager; + + MockNodeLabelsManager(String config, + RMNodeLabelsManager nodeLabelsManager, + Map partitionToResource) throws IOException { + this.config = config; + this.partitionToResource = partitionToResource; + this.clusterResource = Resources.createResource(0); + this.nodeLabelsManager = nodeLabelsManager; + this.parse(); + } + + /** + * Format is: + *
    +   * partition0=total_resource,exclusivity;
    +   * partition1=total_resource,exclusivity;
    +   * ...
    +   * 
    + */ + private void parse() throws IOException { + String[] partitionConfigArr = config.split(";"); + for (String p : partitionConfigArr) { + String partitionName = p.substring(0, p.indexOf("=")); + Resource res = parseResourceFromString(p.substring(p.indexOf("=") + 1, + p.indexOf(","))); + boolean exclusivity = + Boolean.valueOf(p.substring(p.indexOf(",") + 1)); + when(nodeLabelsManager.getResourceByLabel(eq(partitionName), any(Resource.class))) + .thenReturn(res); + when(nodeLabelsManager.isExclusiveNodeLabel(eq(partitionName))).thenReturn(exclusivity); + + // add to partition to resource + partitionToResource.put(partitionName, res); + LOG.debug("add partition=" + partitionName + " totalRes=" + res + + " exclusivity=" + exclusivity); + Resources.addTo(clusterResource, res); + } + + when(nodeLabelsManager.getClusterNodeLabelNames()).thenReturn( + partitionToResource.keySet()); + } + + public Resource getClusterResource() { + return clusterResource; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockQueueHierarchy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockQueueHierarchy.java new file mode 100644 index 0000000000000..ae4ff5a663ed2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockQueueHierarchy.java @@ -0,0 +1,397 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; + +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueResourceQuotas; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueCapacities; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.QueueOrderingPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy; +import org.apache.hadoop.yarn.util.resource.ResourceCalculator; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeSet; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework.parseResourceFromString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isA; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class MockQueueHierarchy { + private static final Logger LOG = LoggerFactory.getLogger(MockQueueHierarchy.class); + private final String ROOT = CapacitySchedulerConfiguration.ROOT; + private final ParentQueue rootQueue; + private String config; + private final CapacityScheduler cs; + private CapacitySchedulerConfiguration conf; + private final ResourceCalculator resourceCalculator; + private final Map nameToCSQueues; + private final Map partitionToResource; + + MockQueueHierarchy(String config, + CapacityScheduler cs, + CapacitySchedulerConfiguration conf, + ResourceCalculator resourceCalculator, + Map partitionToResource) { + this.config = config; + this.cs = cs; + this.conf = conf; + this.resourceCalculator = resourceCalculator; + this.nameToCSQueues = new HashMap<>(); + this.partitionToResource = partitionToResource; + this.rootQueue = init(); + } + + public ParentQueue getRootQueue() { + return rootQueue; + } + + Map getNameToCSQueues() { + return nameToCSQueues; + } + + /** + * Format is: + *
    +   * root (=[guaranteed max used pending (reserved)],=..);
    +   * -A(...);
    +   * --A1(...);
    +   * --A2(...);
    +   * -B...
    +   * 
    + * ";" splits queues, and there should no empty lines, no extra spaces + * + * For each queue, it has configurations to specify capacities (to each + * partition), format is: + *
    +   * - (=[guaranteed max used pending], \
    +   *               =[guaranteed max used pending])
    +   *              {key1=value1,key2=value2};  // Additional configs
    +   * 
    + */ + @SuppressWarnings({ "unchecked", "rawtypes" }) + private ParentQueue init() { + String[] queueExprArray = config.split(";"); + ParentQueue rootQueue = null; + for (int idx = 0; idx < queueExprArray.length; idx++) { + String q = queueExprArray[idx]; + CSQueue queue; + + // Initialize queue + if (isParent(queueExprArray, idx)) { + ParentQueue parentQueue = mock(ParentQueue.class); + queue = parentQueue; + List children = new ArrayList<>(); + when(parentQueue.getChildQueues()).thenReturn(children); + QueueOrderingPolicy policy = mock(QueueOrderingPolicy.class); + when(policy.getConfigName()).thenReturn( + CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY); + when(parentQueue.getQueueOrderingPolicy()).thenReturn(policy); + } else { + LeafQueue leafQueue = mock(LeafQueue.class); + final TreeSet apps = new TreeSet<>( + new Comparator() { + @Override + public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) { + if (a1.getPriority() != null + && !a1.getPriority().equals(a2.getPriority())) { + return a1.getPriority().compareTo(a2.getPriority()); + } + + return a1.getApplicationId() + .compareTo(a2.getApplicationId()); + } + }); + when(leafQueue.getApplications()).thenReturn(apps); + when(leafQueue.getAllApplications()).thenReturn(apps); + OrderingPolicy so = mock(OrderingPolicy.class); + String opName = conf.get(CapacitySchedulerConfiguration.PREFIX + + CapacitySchedulerConfiguration.ROOT + "." + getQueueName(q) + + ".ordering-policy", "fifo"); + if (opName.equals("fair")) { + so = Mockito.spy(new FairOrderingPolicy<>()); + } + when(so.getPreemptionIterator()).thenAnswer(new Answer() { + public Object answer(InvocationOnMock invocation) { + return apps.descendingIterator(); + } + }); + when(leafQueue.getOrderingPolicy()).thenReturn(so); + + Map> ignorePartitionContainers = + new HashMap<>(); + when(leafQueue.getIgnoreExclusivityRMContainers()).thenReturn( + ignorePartitionContainers); + queue = leafQueue; + } + + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + when(queue.getReadLock()).thenReturn(lock.readLock()); + setupQueue(queue, q, queueExprArray, idx); + if (queue.getQueuePath().equals(ROOT)) { + rootQueue = (ParentQueue) queue; + } + } + return rootQueue; + } + + private void setupQueue(CSQueue queue, String q, String[] queueExprArray, + int idx) { + LOG.debug("*** Setup queue, source=" + q); + String queuePath = null; + + int myLevel = getLevel(q); + if (0 == myLevel) { + // It's root + when(queue.getQueuePath()).thenReturn(ROOT); + queuePath = ROOT; + } + + String queueName = getQueueName(q); + when(queue.getQueueName()).thenReturn(queueName); + + // Setup parent queue, and add myself to parentQueue.children-list + ParentQueue parentQueue = getParentQueue(queueExprArray, idx, myLevel); + if (null != parentQueue) { + when(queue.getParent()).thenReturn(parentQueue); + parentQueue.getChildQueues().add(queue); + + // Setup my path + queuePath = parentQueue.getQueuePath() + "." + queueName; + } + when(queue.getQueuePath()).thenReturn(queuePath); + + QueueCapacities qc = new QueueCapacities(0 == myLevel); + ResourceUsage ru = new ResourceUsage(); + QueueResourceQuotas qr = new QueueResourceQuotas(); + + when(queue.getQueueCapacities()).thenReturn(qc); + when(queue.getQueueResourceUsage()).thenReturn(ru); + when(queue.getQueueResourceQuotas()).thenReturn(qr); + + LOG.debug("Setup queue, short name=" + queue.getQueueName() + " path=" + + queue.getQueuePath()); + LOG.debug("Parent=" + (parentQueue == null ? "null" : parentQueue + .getQueuePath())); + + // Setup other fields like used resource, guaranteed resource, etc. + String capacitySettingStr = q.substring(q.indexOf("(") + 1, q.indexOf(")")); + for (String s : capacitySettingStr.split(",")) { + String partitionName = s.substring(0, s.indexOf("=")); + String[] values = s.substring(s.indexOf("[") + 1, s.indexOf("]")).split(" "); + // Add a small epsilon to capacities to avoid truncate when doing + // Resources.multiply + float epsilon = 1e-6f; + Resource toResourcePerPartition = partitionToResource.get(partitionName); + float absGuaranteed = Resources.divide(resourceCalculator, toResourcePerPartition, + parseResourceFromString(values[0].trim()), toResourcePerPartition) + + epsilon; + float absMax = Resources.divide(resourceCalculator, toResourcePerPartition, + parseResourceFromString(values[1].trim()), toResourcePerPartition) + + epsilon; + float absUsed = Resources.divide(resourceCalculator, toResourcePerPartition, + parseResourceFromString(values[2].trim()), toResourcePerPartition) + + epsilon; + float used = Resources.divide(resourceCalculator, toResourcePerPartition, + parseResourceFromString(values[2].trim()), + parseResourceFromString(values[0].trim())) + epsilon; + Resource pending = parseResourceFromString(values[3].trim()); + qc.setAbsoluteCapacity(partitionName, absGuaranteed); + qc.setAbsoluteMaximumCapacity(partitionName, absMax); + qc.setAbsoluteUsedCapacity(partitionName, absUsed); + qc.setUsedCapacity(partitionName, used); + qr.setEffectiveMaxResource(parseResourceFromString(values[1].trim())); + qr.setEffectiveMinResource(parseResourceFromString(values[0].trim())); + qr.setEffectiveMaxResource(partitionName, + parseResourceFromString(values[1].trim())); + qr.setEffectiveMinResource(partitionName, + parseResourceFromString(values[0].trim())); + when(queue.getUsedCapacity()).thenReturn(used); + when(queue.getEffectiveCapacity(partitionName)) + .thenReturn(parseResourceFromString(values[0].trim())); + when(queue.getEffectiveMaxCapacity(partitionName)) + .thenReturn(parseResourceFromString(values[1].trim())); + ru.setPending(partitionName, pending); + // Setup reserved resource if it contained by input config + Resource reserved = Resources.none(); + if(values.length == 5) { + reserved = parseResourceFromString(values[4].trim()); + ru.setReserved(partitionName, reserved); + } + if (!isParent(queueExprArray, idx)) { + LeafQueue lq = (LeafQueue) queue; + when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), + isA(String.class), eq(false))).thenReturn(pending); + when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), + isA(String.class), eq(true))).thenReturn( + Resources.subtract(pending, reserved)); + } + ru.setUsed(partitionName, parseResourceFromString(values[2].trim())); + + LOG.debug("Setup queue=" + queueName + " partition=" + partitionName + + " [abs_guaranteed=" + absGuaranteed + ",abs_max=" + absMax + + ",abs_used" + absUsed + ",pending_resource=" + pending + + ", reserved_resource=" + reserved + "]"); + } + + // Setup preemption disabled + when(queue.getPreemptionDisabled()).thenReturn( + conf.getPreemptionDisabled(queuePath, false)); + + // Setup other queue configurations + Map otherConfigs = getOtherConfigurations( + queueExprArray[idx]); + if (otherConfigs.containsKey("priority")) { + when(queue.getPriority()).thenReturn( + Priority.newInstance(Integer.valueOf(otherConfigs.get("priority")))); + } else { + // set queue's priority to 0 by default + when(queue.getPriority()).thenReturn(Priority.newInstance(0)); + } + + // Setup disable preemption of queues + if (otherConfigs.containsKey("disable_preemption")) { + when(queue.getPreemptionDisabled()).thenReturn( + Boolean.valueOf(otherConfigs.get("disable_preemption"))); + } + + //TODO: Refactor this test class to use queue path internally like CS + // does from now on + nameToCSQueues.put(queuePath, queue); + nameToCSQueues.put(queueName, queue); + when(cs.getQueue(eq(queuePath))).thenReturn(queue); + when(cs.getQueue(eq(queueName))).thenReturn(queue); + when(cs.normalizeQueueName(eq(queuePath))).thenReturn(queuePath); + when(cs.normalizeQueueName(eq(queueName))).thenReturn(queuePath); + } + + /** + * Get additional queue's configurations + * @param queueExpr queue expr + * @return maps of configs + */ + private Map getOtherConfigurations(String queueExpr) { + if (queueExpr.contains("{")) { + int left = queueExpr.indexOf('{'); + int right = queueExpr.indexOf('}'); + + if (right > left) { + Map configs = new HashMap<>(); + + String subStr = queueExpr.substring(left + 1, right); + for (String kv : subStr.split(",")) { + if (kv.contains("=")) { + String key = kv.substring(0, kv.indexOf("=")); + String value = kv.substring(kv.indexOf("=") + 1); + configs.put(key, value); + } + } + + return configs; + } + } + + return Collections.emptyMap(); + } + + private String getQueueName(String q) { + int idx = 0; + // find first != '-' char + while (idx < q.length() && q.charAt(idx) == '-') { + idx++; + } + if (idx == q.length()) { + throw new IllegalArgumentException("illegal input:" + q); + } + // name = after '-' and before '(' + String name = q.substring(idx, q.indexOf('(')); + if (name.isEmpty()) { + throw new IllegalArgumentException("queue name shouldn't be empty:" + q); + } + if (name.contains(".")) { + throw new IllegalArgumentException("queue name shouldn't contain '.':" + + name); + } + return name; + } + + private ParentQueue getParentQueue(String[] queueExprArray, int idx, int myLevel) { + idx--; + while (idx >= 0) { + int level = getLevel(queueExprArray[idx]); + if (level < myLevel) { + String parentQueueName = getQueueName(queueExprArray[idx]); + return (ParentQueue) nameToCSQueues.get(parentQueueName); + } + idx--; + } + + return null; + } + + /** + * Get if a queue is ParentQueue + */ + private boolean isParent(String[] queues, int idx) { + int myLevel = getLevel(queues[idx]); + idx++; + while (idx < queues.length && getLevel(queues[idx]) == myLevel) { + idx++; + } + if (idx >= queues.length || getLevel(queues[idx]) < myLevel) { + // It's a LeafQueue + return false; + } else { + return true; + } + } + + /** + * Level of a queue is how many "-" at beginning, root's level is 0 + */ + private int getLevel(String q) { + int level = 0; // level = how many "-" at beginning + while (level < q.length() && q.charAt(level) == '-') { + level++; + } + return level; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockSchedulerNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockSchedulerNodes.java new file mode 100644 index 0000000000000..cdea35ae23241 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/MockSchedulerNodes.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework.parseResourceFromString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class MockSchedulerNodes { + private static final Logger LOG = LoggerFactory.getLogger(MockSchedulerNodes.class); + private String config; + private Map nodeIdToSchedulerNodes = new HashMap<>(); + + MockSchedulerNodes(String config) { + this.config = config; + init(); + } + + /** + * Format is: + * host1=partition[ res=resource]; + * host2=partition[ res=resource]; + */ + private void init() { + String[] nodesConfigStrArray = config.split(";"); + for (String p : nodesConfigStrArray) { + String[] arr = p.split(" "); + + NodeId nodeId = NodeId.newInstance(arr[0].substring(0, arr[0].indexOf("=")), 1); + String partition = arr[0].substring(arr[0].indexOf("=") + 1); + + FiCaSchedulerNode sn = mock(FiCaSchedulerNode.class); + when(sn.getNodeID()).thenReturn(nodeId); + when(sn.getPartition()).thenReturn(partition); + + Resource totalRes = Resources.createResource(0); + if (arr.length > 1) { + String res = arr[1]; + if (res.contains("res=")) { + String resString = res.substring( + res.indexOf("res=") + "res=".length()); + totalRes = parseResourceFromString(resString); + } + } + when(sn.getTotalResource()).thenReturn(totalRes); + when(sn.getUnallocatedResource()).thenReturn(Resources.clone(totalRes)); + + // TODO, add settings of killable resources when necessary + when(sn.getTotalKillableResources()).thenReturn(Resources.none()); + + List liveContainers = new ArrayList<>(); + when(sn.getCopiedListOfRunningContainers()).thenReturn(liveContainers); + + nodeIdToSchedulerNodes.put(nodeId, sn); + + LOG.debug("add scheduler node, id=" + nodeId + ", partition=" + partition); + } + } + + Map getNodeIdToSchedulerNodes() { + return nodeIdToSchedulerNodes; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java new file mode 100644 index 0000000000000..024ec86f7d70e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -0,0 +1,318 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; + +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicyForNodePartitions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueCapacities; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent; +import org.apache.hadoop.yarn.util.Clock; +import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; +import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.apache.hadoop.yarn.util.resource.ResourceCalculator; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.mockito.ArgumentMatcher; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.yarn.event.Event; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ProportionalCapacityPreemptionPolicyMockFramework { + private static final Logger LOG = LoggerFactory.getLogger( + TestProportionalCapacityPreemptionPolicyForNodePartitions.class); + private static final double ALLOWED_CAPACITY_DELTA = 1e-3; + + private Map nameToCSQueues; + private Map partitionToResource; + private Map nodeIdToSchedulerNodes; + private RMNodeLabelsManager nodeLabelsManager; + public RMContext rmContext; + + public ResourceCalculator resourceCalculator = new DefaultResourceCalculator(); + public Clock mClock; + public CapacitySchedulerConfiguration conf; + public CapacityScheduler cs; + @SuppressWarnings("rawtypes") + public EventHandler eventHandler; + public ProportionalCapacityPreemptionPolicy policy; + private Resource clusterResource; + // Initialize resource map + public Map riMap = new HashMap<>(); + + private void resetResourceInformationMap() { + // Initialize mandatory resources + ResourceInformation memory = ResourceInformation.newInstance( + ResourceInformation.MEMORY_MB.getName(), + ResourceInformation.MEMORY_MB.getUnits(), + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB); + ResourceInformation vcores = ResourceInformation.newInstance( + ResourceInformation.VCORES.getName(), + ResourceInformation.VCORES.getUnits(), + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + riMap.put(ResourceInformation.MEMORY_URI, memory); + riMap.put(ResourceInformation.VCORES_URI, vcores); + + ResourceUtils.initializeResourcesFromResourceInformationMap(riMap); + } + + @SuppressWarnings("unchecked") + @Before + public void setup() { + resetResourceInformationMap(); + + org.apache.log4j.Logger.getRootLogger().setLevel( + org.apache.log4j.Level.DEBUG); + + conf = new CapacitySchedulerConfiguration(new Configuration(false)); + conf.setLong( + CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, 10000); + conf.setLong(CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, + 3000); + // report "ideal" preempt + conf.setFloat(CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND, + (float) 1.0); + conf.setFloat( + CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR, + (float) 1.0); + + mClock = mock(Clock.class); + cs = mock(CapacityScheduler.class); + when(cs.getResourceCalculator()).thenReturn(resourceCalculator); + when(cs.getPreemptionManager()).thenReturn(new PreemptionManager()); + when(cs.getConfiguration()).thenReturn(conf); + + nodeLabelsManager = mock(RMNodeLabelsManager.class); + eventHandler = mock(EventHandler.class); + + rmContext = mock(RMContext.class); + when(rmContext.getNodeLabelManager()).thenReturn(nodeLabelsManager); + Dispatcher dispatcher = mock(Dispatcher.class); + when(rmContext.getDispatcher()).thenReturn(dispatcher); + when(dispatcher.getEventHandler()).thenReturn(eventHandler); + when(cs.getRMContext()).thenReturn(rmContext); + + partitionToResource = new HashMap<>(); + nodeIdToSchedulerNodes = new HashMap<>(); + nameToCSQueues = new HashMap<>(); + clusterResource = Resource.newInstance(0, 0); + } + + @After + public void cleanup() { + resetResourceInformationMap(); + } + + public void buildEnv(String labelsConfig, String nodesConfig, + String queuesConfig, String appsConfig) throws IOException { + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig, false); + } + + public void buildEnv(String labelsConfig, String nodesConfig, + String queuesConfig, String appsConfig, + boolean useDominantResourceCalculator) throws IOException { + if (useDominantResourceCalculator) { + when(cs.getResourceCalculator()).thenReturn( + new DominantResourceCalculator()); + } + + MockNodeLabelsManager mockNodeLabelsManager = + new MockNodeLabelsManager(labelsConfig, + nodeLabelsManager, partitionToResource); + clusterResource = mockNodeLabelsManager.getClusterResource(); + + MockSchedulerNodes mockSchedulerNodes = + new MockSchedulerNodes(nodesConfig); + nodeIdToSchedulerNodes = mockSchedulerNodes.getNodeIdToSchedulerNodes(); + addNodeIdDataToScheduler(); + + ParentQueue root = parseQueueConfig(queuesConfig); + + when(cs.getRootQueue()).thenReturn(root); + when(cs.getClusterResource()).thenReturn(clusterResource); + new MockApplications(appsConfig, resourceCalculator, nameToCSQueues, + partitionToResource, nodeIdToSchedulerNodes); + + policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock); + } + + private ParentQueue parseQueueConfig(String queuesConfig) { + MockQueueHierarchy mockQueueHierarchy = + new MockQueueHierarchy(queuesConfig, cs, conf, resourceCalculator, + partitionToResource); + this.nameToCSQueues = mockQueueHierarchy.getNameToCSQueues(); + return mockQueueHierarchy.getRootQueue(); + } + + private void addNodeIdDataToScheduler() { + for (NodeId nodeId : nodeIdToSchedulerNodes.keySet()) { + when(cs.getSchedulerNode(nodeId)).thenReturn( + nodeIdToSchedulerNodes.get(nodeId)); + } + List allNodes = new ArrayList<>( + nodeIdToSchedulerNodes.values()); + when(cs.getAllNodes()).thenReturn(allNodes); + } + + protected void updateQueueConfig(String queuesConfig) { + ParentQueue root = parseQueueConfig(queuesConfig); + when(cs.getRootQueue()).thenReturn(root); + } + + //TODO this can probably be replaced with some parser logic already implemented somewhere + static Resource parseResourceFromString(String resString) { + String[] resource = resString.split(":"); + Resource res; + if (resource.length == 1) { + res = Resources.createResource(Integer.valueOf(resource[0])); + } else { + res = Resources.createResource(Integer.valueOf(resource[0]), + Integer.valueOf(resource[1])); + if (resource.length > 2) { + // Using the same order of resources from ResourceUtils, set resource + // information. + ResourceInformation[] storedResourceInfo = ResourceUtils + .getResourceTypesArray(); + for (int i = 2; i < resource.length; i++) { + res.setResourceInformation(storedResourceInfo[i].getName(), + ResourceInformation.newInstance(storedResourceInfo[i].getName(), + storedResourceInfo[i].getUnits(), + Integer.valueOf(resource[i]))); + } + } + } + return res; + } + + public ApplicationAttemptId getAppAttemptId(int id) { + ApplicationId appId = ApplicationId.newInstance(0L, id); + return ApplicationAttemptId.newInstance(appId, 1); + } + + protected void checkContainerNodesInApp(FiCaSchedulerApp app, + int expectedContainersNumber, String host) { + NodeId nodeId = NodeId.newInstance(host, 1); + int num = 0; + for (RMContainer c : app.getLiveContainers()) { + if (c.getAllocatedNode().equals(nodeId)) { + num++; + } + } + for (RMContainer c : app.getReservedContainers()) { + if (c.getAllocatedNode().equals(nodeId)) { + num++; + } + } + Assert.assertEquals(expectedContainersNumber, num); + } + + public FiCaSchedulerApp getApp(String queueName, int appId) { + for (FiCaSchedulerApp app : ((LeafQueue) cs.getQueue(queueName)) + .getApplications()) { + if (app.getApplicationId().getId() == appId) { + return app; + } + } + return null; + } + + protected void checkAbsCapacities(CSQueue queue, String partition, + float guaranteed, float max, float used) { + QueueCapacities qc = queue.getQueueCapacities(); + Assert.assertEquals(guaranteed, qc.getAbsoluteCapacity(partition), + ALLOWED_CAPACITY_DELTA); + Assert.assertEquals(max, qc.getAbsoluteMaximumCapacity(partition), + ALLOWED_CAPACITY_DELTA); + Assert.assertEquals(used, qc.getAbsoluteUsedCapacity(partition), + ALLOWED_CAPACITY_DELTA); + } + + protected void checkPendingResource(CSQueue queue, String partition, + int pending) { + ResourceUsage ru = queue.getQueueResourceUsage(); + Assert.assertEquals(pending, ru.getPending(partition).getMemorySize()); + } + + protected void checkPriority(CSQueue queue, int expectedPriority) { + Assert.assertEquals(expectedPriority, queue.getPriority().getPriority()); + } + + protected void checkReservedResource(CSQueue queue, String partition, + int reserved) { + ResourceUsage ru = queue.getQueueResourceUsage(); + Assert.assertEquals(reserved, ru.getReserved(partition).getMemorySize()); + } + + public static class IsPreemptionRequestForQueueAndNode + implements ArgumentMatcher { + private final ApplicationAttemptId appAttId; + private final String queueName; + private final NodeId nodeId; + + public IsPreemptionRequestForQueueAndNode(ApplicationAttemptId appAttId, + String queueName, NodeId nodeId) { + this.appAttId = appAttId; + this.queueName = queueName; + this.nodeId = nodeId; + } + @Override + public boolean matches(ContainerPreemptEvent cpe) { + return appAttId.equals(cpe.getAppId()) + && queueName.equals(cpe.getContainer().getQueueName()) + && nodeId.equals(cpe.getContainer().getAllocatedNode()); + } + @Override + public String toString() { + return appAttId.toString(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestFileSystemNodeAttributeStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestFileSystemNodeAttributeStore.java index a05cf3eb5cc5e..840ff2aec284d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestFileSystemNodeAttributeStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestFileSystemNodeAttributeStore.java @@ -23,7 +23,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.NodeAttribute; import org.apache.hadoop.yarn.api.records.NodeAttributeType; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestNodeAttributesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestNodeAttributesManager.java index b1816774241cd..e10f1d0189088 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestNodeAttributesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestNodeAttributesManager.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.nodelabels; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.NodeAttribute; import org.apache.hadoop.yarn.api.records.NodeAttributeType; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMDelegatedNodeLabelsUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMDelegatedNodeLabelsUpdater.java index 928124d356c11..993b05e24e0fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMDelegatedNodeLabelsUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMDelegatedNodeLabelsUpdater.java @@ -41,8 +41,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; public class TestRMDelegatedNodeLabelsUpdater extends NodeLabelTestBase { private YarnConfiguration conf; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java index 5c9b073defac2..0400f7dfcbf0e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java @@ -59,8 +59,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestRMNodeLabelsManager extends NodeLabelTestBase { private final Resource EMPTY_RESOURCE = Resource.newInstance(0, 0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java index f7c7b501b2b2f..29141aedf6c48 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestAppNameMappingPlacementRule.java @@ -37,7 +37,15 @@ import static org.mockito.Mockito.when; public class TestAppNameMappingPlacementRule { + private static final String ROOT_QUEUE = "root"; + private static final String Q2_QUEUE = "q2"; + private static final String Q1_QUEUE = "q1"; + private static final String USER_NAME = "user"; + private static final String DEFAULT_QUEUE = "default"; + private static final String APPLICATION_PLACEHOLDER = "%application"; + private static final String AMBIGUOUS_QUEUE = "ambiguousQueue"; private static final String APP_NAME = "DistributedShell"; + private static final String MAPREDUCE_APP_NAME = "MAPREDUCE"; private YarnConfiguration conf = new YarnConfiguration(); @@ -47,13 +55,13 @@ public void setup() { SimpleGroupsMapping.class, GroupMappingServiceProvider.class); } - private void verifyQueueMapping(QueueMappingEntity queueMapping, + private void verifyQueueMapping(QueueMapping queueMapping, String user, String expectedQueue) throws YarnException { verifyQueueMapping(queueMapping, user, queueMapping.getQueue(), expectedQueue, false); } - private void verifyQueueMapping(QueueMappingEntity queueMapping, + private void verifyQueueMapping(QueueMapping queueMapping, String user, String inputQueue, String expectedQueue, boolean overwrite) throws YarnException { AppNameMappingPlacementRule rule = new AppNameMappingPlacementRule( @@ -62,16 +70,20 @@ private void verifyQueueMapping(QueueMappingEntity queueMapping, CapacitySchedulerQueueManager qm = mock(CapacitySchedulerQueueManager.class); when(qm.isAmbiguous(Mockito.isA(String.class))).thenReturn(false); + when(qm.isAmbiguous(AMBIGUOUS_QUEUE)).thenReturn(true); + rule.queueManager = qm; ApplicationSubmissionContext asc = Records.newRecord( ApplicationSubmissionContext.class); - if (inputQueue.equals("%application")) { + if (inputQueue.equals(APPLICATION_PLACEHOLDER)) { inputQueue = APP_NAME; } asc.setQueue(inputQueue); String appName = queueMapping.getSource(); - if (appName.equals("%application")) { + // to create a scenario when source != appName + if (appName.equals(APPLICATION_PLACEHOLDER) + || appName.equals(MAPREDUCE_APP_NAME)) { appName = APP_NAME; } asc.setApplicationName(appName); @@ -81,23 +93,85 @@ private void verifyQueueMapping(QueueMappingEntity queueMapping, ctx != null ? ctx.getQueue() : inputQueue); } + public QueueMapping getQueueMapping(String source, String queue) { + return getQueueMapping(source, null, queue); + } + + public QueueMapping getQueueMapping(String source, String parent, + String queue) { + return QueueMapping.QueueMappingBuilder.create() + .type(QueueMapping.MappingType.APPLICATION) + .source(source) + .queue(queue) + .parentQueue(parent) + .build(); + } + + @Test + public void testSpecificAppNameMappedToDefinedQueue() throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, Q1_QUEUE), + USER_NAME, Q1_QUEUE); + } + + @Test + public void testPlaceholderAppSourceMappedToQueue() throws YarnException { + verifyQueueMapping(getQueueMapping(APPLICATION_PLACEHOLDER, Q2_QUEUE), + USER_NAME, Q2_QUEUE); + } + + @Test + public void testPlaceHolderAppSourceAndQueueMappedToAppNameQueue() + throws YarnException { + verifyQueueMapping(getQueueMapping(APPLICATION_PLACEHOLDER, + APPLICATION_PLACEHOLDER), USER_NAME, APP_NAME); + } + + @Test + public void testQueueInMappingOverridesSpecifiedQueue() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, + Q1_QUEUE), USER_NAME, Q2_QUEUE, Q1_QUEUE, true); + } + + @Test + public void testQueueInMappingDoesNotOverrideSpecifiedQueue() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, + Q1_QUEUE), USER_NAME, Q2_QUEUE, Q2_QUEUE, false); + } + + @Test + public void testDefaultQueueInMappingIsNotUsedWithoutOverride() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, + DEFAULT_QUEUE), USER_NAME, Q2_QUEUE, Q2_QUEUE, false); + } + + @Test + public void testDefaultQueueInMappingEqualsToInputQueue() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, + DEFAULT_QUEUE), USER_NAME, DEFAULT_QUEUE, DEFAULT_QUEUE, false); + } + + @Test + public void testMappingSourceDiffersFromInputQueue() throws YarnException { + verifyQueueMapping(getQueueMapping(MAPREDUCE_APP_NAME, + Q1_QUEUE), USER_NAME, DEFAULT_QUEUE, DEFAULT_QUEUE, false); + } + + @Test(expected = YarnException.class) + public void testMappingContainsAmbiguousLeafQueueWithoutParent() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, AMBIGUOUS_QUEUE), + USER_NAME, DEFAULT_QUEUE, DEFAULT_QUEUE, false); + } + @Test - public void testMapping() throws YarnException { - // simple base case for mapping user to queue - verifyQueueMapping(new QueueMappingEntity(APP_NAME, - "q1"), "user_1", "q1"); - verifyQueueMapping(new QueueMappingEntity("%application", "q2"), "user_1", - "q2"); - verifyQueueMapping(new QueueMappingEntity("%application", "%application"), - "user_1", APP_NAME); - - // specify overwritten, and see if user specified a queue, and it will be - // overridden - verifyQueueMapping(new QueueMappingEntity(APP_NAME, - "q1"), "1", "q2", "q1", true); - - // if overwritten not specified, it should be which user specified - verifyQueueMapping(new QueueMappingEntity(APP_NAME, - "q1"), "1", "q2", "q2", false); + public void testMappingContainsAmbiguousLeafQueueWithParent() + throws YarnException { + verifyQueueMapping(getQueueMapping(APP_NAME, ROOT_QUEUE, AMBIGUOUS_QUEUE), + USER_NAME, DEFAULT_QUEUE, AMBIGUOUS_QUEUE, false); } + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java index 3b85fdad08d3d..22a9125576f1b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestPlacementManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.placement; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; @@ -28,7 +27,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.util.Records; +import org.junit.After; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import java.util.ArrayList; @@ -49,20 +50,22 @@ public class TestPlacementManager { public static final String PARENT_QUEUE = "c"; private MockRM mockRM = null; - - private static final long CLUSTER_TIMESTAMP = System.currentTimeMillis(); + private CapacitySchedulerConfiguration conf; private String getQueueMapping(String parentQueue, String leafQueue) { return parentQueue + DOT + leafQueue; } - @Test - public void testPlaceApplicationWithPlacementRuleChain() throws Exception { - CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); + @Before + public void setup() { + conf = new CapacitySchedulerConfiguration(); setupQueueConfiguration(conf); conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class); + } + @Test + public void testPlaceApplicationWithPlacementRuleChain() throws Exception { mockRM = new MockRM(conf); CapacityScheduler cs = (CapacityScheduler) mockRM.getResourceScheduler(); mockRM.start(); @@ -92,8 +95,12 @@ public void testPlaceApplicationWithPlacementRuleChain() throws Exception { Assert.assertNull("Placement should be null", pm.placeApplication(asc, USER2)); - QueueMappingEntity queueMappingEntity = new QueueMappingEntity(APP_NAME, - USER1, PARENT_QUEUE); + QueueMapping queueMappingEntity = QueueMapping.QueueMappingBuilder.create() + .type(MappingType.APPLICATION) + .source(APP_NAME) + .queue(USER1) + .parentQueue(PARENT_QUEUE) + .build(); AppNameMappingPlacementRule anRule = new AppNameMappingPlacementRule(false, Arrays.asList(queueMappingEntity)); @@ -108,4 +115,38 @@ public void testPlaceApplicationWithPlacementRuleChain() throws Exception { } } + @Test + public void testPlacementRuleUpdationOrder() throws Exception { + List queueMappings = new ArrayList<>(); + QueueMapping userQueueMapping = QueueMappingBuilder.create() + .type(MappingType.USER).source(USER1) + .queue(getQueueMapping(PARENT_QUEUE, USER1)).build(); + UserGroupMappingPlacementRule ugRule = new UserGroupMappingPlacementRule( + false, Arrays.asList(userQueueMapping), null); + + // Configure placement rule + conf.set(YarnConfiguration.QUEUE_PLACEMENT_RULES, ugRule.getName()); + queueMappings.add(userQueueMapping); + conf.setQueueMappings(queueMappings); + + mockRM = new MockRM(conf); + CapacityScheduler cs = (CapacityScheduler) mockRM.getResourceScheduler(); + mockRM.start(); + PlacementManager pm = cs.getRMContext().getQueuePlacementManager(); + + // As we are setting placement rule, It shouldn't update default + // placement rule ie user-group. Number of placemnt rules should be 1. + Assert.assertEquals(1, pm.getPlacementRules().size()); + // Verifying if placement rule set is same as the one we configured + Assert.assertEquals(ugRule.getName(), + pm.getPlacementRules().get(0).getName()); + } + + @After + public void tearDown() { + if (null != mockRM) { + mockRM.stop(); + } + } + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java index 95a0a8068ff26..a676d639c9d4e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java @@ -21,9 +21,14 @@ import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.isNull; import java.util.Arrays; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.security.Groups; @@ -34,6 +39,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.MappingType; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.QueueMappingBuilder; import org.apache.hadoop.yarn.server.resourcemanager.placement.TestUserGroupMappingPlacementRule.QueueMappingTestData.QueueMappingTestDataBuilder; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AbstractCSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ManagedParentQueue; @@ -44,8 +50,148 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestUserGroupMappingPlacementRule { + private static final Logger LOG = + LoggerFactory.getLogger(TestUserGroupMappingPlacementRule.class); + + private static class MockQueueHierarchyBuilder { + private static final String ROOT = "root"; + private static final String QUEUE_SEP = "."; + private List queuePaths = Lists.newArrayList(); + private List managedParentQueues = Lists.newArrayList(); + private CapacitySchedulerQueueManager queueManager; + + public static MockQueueHierarchyBuilder create() { + return new MockQueueHierarchyBuilder(); + } + + public MockQueueHierarchyBuilder withQueueManager( + CapacitySchedulerQueueManager queueManager) { + this.queueManager = queueManager; + return this; + } + + public MockQueueHierarchyBuilder withQueue(String queue) { + this.queuePaths.add(queue); + return this; + } + + public MockQueueHierarchyBuilder withManagedParentQueue( + String managedQueue) { + this.managedParentQueues.add(managedQueue); + return this; + } + + public void build() { + if (this.queueManager == null) { + throw new IllegalStateException( + "QueueManager instance is not provided!"); + } + + for (String managedParentQueue : managedParentQueues) { + if (!queuePaths.contains(managedParentQueue)) { + queuePaths.add(managedParentQueue); + } else { + throw new IllegalStateException("Cannot add a managed parent " + + "and a simple queue with the same path"); + } + } + + Map queues = Maps.newHashMap(); + for (String queuePath : queuePaths) { + LOG.info("Processing queue path: " + queuePath); + addQueues(queues, queuePath); + } + } + + private void addQueues(Map queues, + String queuePath) { + final String[] pathComponents = queuePath.split("\\" + QUEUE_SEP); + + String currentQueuePath = ""; + for (int i = 0; i < pathComponents.length; ++i) { + boolean isLeaf = i == pathComponents.length - 1; + String queueName = pathComponents[i]; + String parentPath = currentQueuePath; + currentQueuePath += currentQueuePath.equals("") ? + queueName : QUEUE_SEP + queueName; + + if (managedParentQueues.contains(parentPath) && !isLeaf) { + throw new IllegalStateException("Cannot add a queue under " + + "managed parent"); + } + if (!queues.containsKey(currentQueuePath)) { + ParentQueue parentQueue = (ParentQueue) queues.get(parentPath); + AbstractCSQueue queue = createQueue(parentQueue, queueName, + currentQueuePath, isLeaf); + queues.put(currentQueuePath, queue); + } + } + } + + private AbstractCSQueue createQueue(ParentQueue parentQueue, + String queueName, String currentQueuePath, boolean isLeaf) { + if (queueName.equals(ROOT)) { + return createRootQueue(ROOT); + } else if (managedParentQueues.contains(currentQueuePath)) { + return addManagedParentQueueAsChildOf(parentQueue, queueName); + } else if (isLeaf) { + return addLeafQueueAsChildOf(parentQueue, queueName); + } else { + return addParentQueueAsChildOf(parentQueue, queueName); + } + } + + private AbstractCSQueue createRootQueue(String rootQueueName) { + ParentQueue root = mock(ParentQueue.class); + when(root.getQueuePath()).thenReturn(rootQueueName); + when(queueManager.getQueue(rootQueueName)).thenReturn(root); + when(queueManager.getQueueByFullName(rootQueueName)).thenReturn(root); + return root; + } + + private AbstractCSQueue addParentQueueAsChildOf(ParentQueue parent, + String queueName) { + ParentQueue queue = mock(ParentQueue.class); + setQueueFields(parent, queue, queueName); + return queue; + } + + private AbstractCSQueue addManagedParentQueueAsChildOf(ParentQueue parent, + String queueName) { + ManagedParentQueue queue = mock(ManagedParentQueue.class); + setQueueFields(parent, queue, queueName); + return queue; + } + + private AbstractCSQueue addLeafQueueAsChildOf(ParentQueue parent, + String queueName) { + LeafQueue queue = mock(LeafQueue.class); + setQueueFields(parent, queue, queueName); + return queue; + } + + private void setQueueFields(ParentQueue parent, AbstractCSQueue newQueue, + String queueName) { + String fullPathOfQueue = parent.getQueuePath() + QUEUE_SEP + queueName; + addQueueToQueueManager(queueName, newQueue, fullPathOfQueue); + + when(newQueue.getParent()).thenReturn(parent); + when(newQueue.getQueuePath()).thenReturn(fullPathOfQueue); + when(newQueue.getQueueName()).thenReturn(queueName); + } + + private void addQueueToQueueManager(String queueName, AbstractCSQueue queue, + String fullPathOfQueue) { + when(queueManager.getQueue(queueName)).thenReturn(queue); + when(queueManager.getQueue(fullPathOfQueue)).thenReturn(queue); + when(queueManager.getQueueByFullName(fullPathOfQueue)).thenReturn(queue); + } + } + YarnConfiguration conf = new YarnConfiguration(); @Before @@ -70,48 +216,18 @@ private void verifyQueueMapping(QueueMappingTestData queueMappingTestData) CapacitySchedulerQueueManager queueManager = mock(CapacitySchedulerQueueManager.class); - ParentQueue agroup = mock(ParentQueue.class); - when(agroup.getQueuePath()).thenReturn("root.agroup"); - ParentQueue bsubgroup2 = mock(ParentQueue.class); - when(bsubgroup2.getQueuePath()).thenReturn("root.bsubgroup2"); - - ManagedParentQueue managedParent = mock(ManagedParentQueue.class); - when(managedParent.getQueueName()).thenReturn("root.managedParent"); - - LeafQueue a = mock(LeafQueue.class); - when(a.getQueuePath()).thenReturn("root.agroup.a"); - when(a.getParent()).thenReturn(agroup); - LeafQueue b = mock(LeafQueue.class); - when(b.getQueuePath()).thenReturn("root.bsubgroup2.b"); - when(b.getParent()).thenReturn(bsubgroup2); - LeafQueue asubgroup2 = mock(LeafQueue.class); - when(asubgroup2.getQueuePath()).thenReturn("root.asubgroup2"); - - when(queueManager.getQueue("a")).thenReturn(a); - when(queueManager.getQueue("b")).thenReturn(b); - when(queueManager.getQueue("agroup")).thenReturn(agroup); - when(queueManager.getQueue("bsubgroup2")).thenReturn(bsubgroup2); - when(queueManager.getQueue("asubgroup2")).thenReturn(asubgroup2); - when(queueManager.getQueue("managedParent")).thenReturn(managedParent); - - when(queueManager.getQueue("root.agroup")).thenReturn(agroup); - when(queueManager.getQueue("root.bsubgroup2")).thenReturn(bsubgroup2); - when(queueManager.getQueue("root.asubgroup2")).thenReturn(asubgroup2); - when(queueManager.getQueue("root.agroup.a")).thenReturn(a); - when(queueManager.getQueue("root.bsubgroup2.b")).thenReturn(b); - when(queueManager.getQueue("root.managedParent")).thenReturn(managedParent); - - when(queueManager.getQueueByFullName("root.agroup")).thenReturn(agroup); - when(queueManager.getQueueByFullName("root.bsubgroup2")) - .thenReturn(bsubgroup2); - when(queueManager.getQueueByFullName("root.asubgroup2")) - .thenReturn(asubgroup2); - when(queueManager.getQueueByFullName("root.agroup.a")).thenReturn(a); - when(queueManager.getQueueByFullName("root.bsubgroup2.b")).thenReturn(b); - when(queueManager.getQueueByFullName("root.managedParent")) - .thenReturn(managedParent); - - + MockQueueHierarchyBuilder.create() + .withQueueManager(queueManager) + .withQueue("root.agroup.a") + .withQueue("root.asubgroup2") + .withQueue("root.bsubgroup2.b") + .withQueue("root.users.primarygrouponly") + .withQueue("root.admins.primarygrouponly") + .withManagedParentQueue("root.managedParent") + .build(); + + when(queueManager.getQueue(isNull())).thenReturn(null); + when(queueManager.isAmbiguous("primarygrouponly")).thenReturn(true); rule.setQueueManager(queueManager); ApplicationSubmissionContext asc = Records.newRecord( ApplicationSubmissionContext.class); @@ -134,7 +250,8 @@ public void testSecondaryGroupMapping() throws YarnException { .source("%user") .queue("%secondary_group").build()) .inputUser("a") - .expectedQueue("root.asubgroup2") + .expectedQueue("asubgroup2") + .expectedParentQueue("root") .build()); // PrimaryGroupMapping.class returns only primary group, no secondary groups @@ -175,35 +292,37 @@ public void testNullGroupMapping() throws YarnException { @Test public void testMapping() throws YarnException { + //if a mapping rule definies no parent, we cannot expect auto creation, + // so we must provide already existing queues verifyQueueMapping( QueueMappingTestDataBuilder.create() .queueMapping(QueueMappingBuilder.create() .type(MappingType.USER) .source("a") - .queue("q1") + .queue("a") .build()) .inputUser("a") - .expectedQueue("q1") + .expectedQueue("a") .build()); verifyQueueMapping( QueueMappingTestDataBuilder.create() .queueMapping(QueueMappingBuilder.create() .type(MappingType.GROUP) .source("agroup") - .queue("q1") + .queue("a") .build()) .inputUser("a") - .expectedQueue("q1") + .expectedQueue("a") .build()); verifyQueueMapping( QueueMappingTestDataBuilder.create() .queueMapping(QueueMappingBuilder.create() .type(MappingType.USER) .source("%user") - .queue("q2") + .queue("b") .build()) .inputUser("a") - .expectedQueue("q2") + .expectedQueue("b") .build()); verifyQueueMapping( QueueMappingTestDataBuilder.create() @@ -223,20 +342,190 @@ public void testMapping() throws YarnException { .queue("%primary_group") .build()) .inputUser("a") - .expectedQueue("root.agroup") + .expectedQueue("agroup") + .expectedParentQueue("root") .build()); verifyQueueMapping( QueueMappingTestDataBuilder.create() - .queueMapping(QueueMappingBuilder.create() - .type(MappingType.USER) - .source("%user") - .queue("%user") - .parentQueue("%primary_group") - .build()) - .inputUser("a") - .expectedQueue("a") - .expectedParentQueue("root.agroup") - .build()); + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .parentQueue("%primary_group") + .build()) + .inputUser("a") + .expectedQueue("a") + .expectedParentQueue("root.agroup") + .build()); + } + + @Test + public void testUserMappingToPrimaryGroupInvalidNestedPlaceholder() + throws YarnException { + // u:%user:%primary_group.%random, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%random") + .parentQueue("%primary_group") + .build()) + .inputUser("a") + .expectedQueue("default") + .build()); + } + + @Test + public void testUserMappingToSecondaryGroupInvalidNestedPlaceholder() + throws YarnException { + // u:%user:%secondary_group.%random, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%random") + .parentQueue("%secondary_group") + .build()) + .inputUser("a") + .expectedQueue("default") + .build()); + } + + @Test + public void testUserMappingDiffersFromSubmitterQueueDoesNotExist() + throws YarnException { + // u:a:%random, submitter: xyz, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("a") + .queue("%random") + .build()) + .inputUser("xyz") + .expectedQueue("default") + .build()); + } + + @Test + public void testSpecificUserMappingToPrimaryGroup() throws YarnException { + // u:a:%primary_group + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("a") + .queue("%primary_group") + .build()) + .inputUser("a") + .expectedQueue("agroup") + .build()); + } + + @Test + public void testSpecificUserMappingToSecondaryGroup() + throws YarnException { + // u:a:%secondary_group + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("a") + .queue("%secondary_group") + .build()) + .inputUser("a") + .expectedQueue("asubgroup2") + .build()); + } + + @Test + public void testSpecificUserMappingWithNoSecondaryGroup() + throws YarnException { + // u:nosecondarygroupuser:%secondary_group, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("nosecondarygroupuser") + .queue("%secondary_group") + .build()) + .inputUser("nosecondarygroupuser") + .expectedQueue("default") + .build()); + } + + @Test + public void testGenericUserMappingWithNoSecondaryGroup() + throws YarnException { + // u:%user:%user, no matching queue + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .parentQueue("%secondary_group") + .build()) + .inputUser("nosecondarygroupuser") + .expectedQueue("default") + .build()); + } + + @Test(expected = YarnException.class) + public void testUserMappingToNestedUserPrimaryGroupWithAmbiguousQueues() + throws YarnException { + // u:%user:%user, submitter nosecondarygroupuser, queue is ambiguous + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .parentQueue("%primary_group") + .build()) + .inputUser("nosecondarygroupuser") + .build()); + } + + @Test(expected = YarnException.class) + public void testResolvedQueueIsNotManaged() + throws YarnException { + // u:%user:%primary_group.%user, "admins" group will be "root", + // resulting parent queue will be "root" which is not managed + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%user") + .parentQueue("%primary_group") + .build()) + .inputUser("admins") + .build()); + } + + @Test(expected = YarnException.class) + public void testUserMappingToPrimaryGroupWithAmbiguousQueues() + throws YarnException { + // u:%user:%primary_group, submitter nosecondarygroupuser, + // queue is ambiguous + verifyQueueMapping( + QueueMappingTestDataBuilder.create() + .queueMapping(QueueMappingBuilder.create() + .type(MappingType.USER) + .source("%user") + .queue("%primary_group") + .build()) + .inputUser("nosecondarygroupuser") + .expectedQueue("default") + .build()); + } + + @Test + public void testUserMappingToQueueNamedAsUsernameWithSecondaryGroupAsParentQueue() + throws YarnException { verifyQueueMapping( QueueMappingTestDataBuilder.create() .queueMapping(QueueMappingBuilder.create() @@ -254,10 +543,10 @@ public void testMapping() throws YarnException { .queueMapping(QueueMappingBuilder.create() .type(MappingType.GROUP) .source("asubgroup1") - .queue("q1") + .queue("a") .build()) .inputUser("a") - .expectedQueue("q1") + .expectedQueue("a") .build()); // "agroup" queue exists @@ -267,10 +556,11 @@ public void testMapping() throws YarnException { .type(MappingType.USER) .source("%user") .queue("%primary_group") - .parentQueue("bsubgroup2") + .parentQueue("root") .build()) .inputUser("a") - .expectedQueue("root.agroup") + .expectedQueue("agroup") + .expectedParentQueue("root") .build()); // "abcgroup" queue doesn't exist, %primary_group queue, not managed parent @@ -296,7 +586,8 @@ public void testMapping() throws YarnException { .parentQueue("managedParent") .build()) .inputUser("abc") - .expectedQueue("root.abcgroup") + .expectedQueue("abcgroup") + .expectedParentQueue("root.managedParent") .build()); // "abcgroup" queue doesn't exist, %secondary_group queue @@ -319,10 +610,11 @@ public void testMapping() throws YarnException { .type(MappingType.USER) .source("%user") .queue("%secondary_group") - .parentQueue("bsubgroup2") + .parentQueue("root") .build()) .inputUser("a") - .expectedQueue("root.asubgroup2") + .expectedQueue("asubgroup2") + .expectedParentQueue("root") .build()); // specify overwritten, and see if user specified a queue, and it will be @@ -332,11 +624,11 @@ public void testMapping() throws YarnException { .queueMapping(QueueMappingBuilder.create() .type(MappingType.USER) .source("user") - .queue("q1") + .queue("a") .build()) .inputUser("user") - .inputQueue("q2") - .expectedQueue("q1") + .inputQueue("b") + .expectedQueue("a") .overwrite(true) .build()); @@ -346,11 +638,11 @@ public void testMapping() throws YarnException { .queueMapping(QueueMappingBuilder.create() .type(MappingType.USER) .source("user") - .queue("q1") + .queue("a") .build()) .inputUser("user") - .inputQueue("q2") - .expectedQueue("q2") + .inputQueue("b") + .expectedQueue("b") .build()); // if overwritten not specified, it should be which user specified @@ -363,8 +655,8 @@ public void testMapping() throws YarnException { .parentQueue("usergroup") .build()) .inputUser("user") - .inputQueue("default") - .expectedQueue("user") + .inputQueue("a") + .expectedQueue("a") .build()); // if overwritten not specified, it should be which user specified @@ -373,12 +665,12 @@ public void testMapping() throws YarnException { .queueMapping(QueueMappingBuilder.create() .type(MappingType.GROUP) .source("usergroup") - .queue("%user") - .parentQueue("usergroup") + .queue("b") + .parentQueue("root.bsubgroup2") .build()) .inputUser("user") - .inputQueue("agroup") - .expectedQueue("user") + .inputQueue("a") + .expectedQueue("b") .overwrite(true) .build()); @@ -390,7 +682,7 @@ public void testMapping() throws YarnException { .type(MappingType.GROUP) .source("agroup") .queue("%user") - .parentQueue("parent1") + .parentQueue("root.agroup") .build()) .inputUser("a") .expectedQueue("a") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java index 7a4ead48fc81b..e93599dd47ec4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java @@ -25,14 +25,17 @@ import java.io.File; import java.io.IOException; +import java.util.function.Consumer; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.resourcemanager.DBManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.fusesource.leveldbjni.JniDBFactory; import org.iq80.leveldb.DB; +import org.iq80.leveldb.Options; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -131,19 +134,23 @@ public void testProxyCA() throws Exception { } @Test(timeout = 60000) - public void testCompactionCycle() throws Exception { + public void testCompactionCycle() { final DB mockdb = mock(DB.class); conf.setLong(YarnConfiguration.RM_LEVELDB_COMPACTION_INTERVAL_SECS, 1); - stateStore = new LeveldbRMStateStore() { + stateStore = new LeveldbRMStateStore(); + DBManager dbManager = new DBManager() { @Override - protected DB openDatabase() throws Exception { + public DB initDatabase(File configurationFile, Options options, + Consumer initMethod) { return mockdb; } }; + dbManager.setDb(mockdb); + stateStore.setDbManager(dbManager); stateStore.init(conf); stateStore.start(); verify(mockdb, timeout(10000)).compactRange( - (byte[]) isNull(), (byte[]) isNull()); + isNull(), isNull()); } @Test @@ -181,12 +188,12 @@ public boolean isFinalStateValid() throws Exception { } @Override - public void writeVersion(Version version) throws Exception { - stateStore.dbStoreVersion(version); + public void writeVersion(Version version) { + stateStore.storeVersion(version); } @Override - public Version getCurrentVersion() throws Exception { + public Version getCurrentVersion() { return stateStore.getCurrentVersion(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index 767c4273c380f..7ffaba5eb61a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -22,6 +22,9 @@ import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.retry.RetryNTimes; import org.apache.curator.test.TestingServer; +import org.apache.hadoop.metrics2.MetricsRecord; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.metrics2.impl.MetricsRecords; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -71,10 +74,10 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.base.Joiner; -import com.google.common.base.Supplier; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertArrayEquals; @@ -1567,4 +1570,40 @@ public void testAppSubmissionContextIsPrunedInFinalApplicationState() Collections.emptyMap(), ctx.getApplicationSchedulingPropertiesMap()); store.close(); } + + @Test + public void testMetricsInited() throws Exception { + TestZKRMStateStoreTester zkTester = new TestZKRMStateStoreTester(); + Configuration conf = createConfForDelegationTokenNodeSplit(1); + MetricsCollectorImpl collector = new MetricsCollectorImpl(); + ZKRMStateStoreOpDurations opDurations = + ((ZKRMStateStore)zkTester.getRMStateStore(conf)).opDurations; + + long anyDuration = 10; + opDurations.addLoadStateCallDuration(anyDuration); + opDurations.addStoreApplicationStateCallDuration(anyDuration); + opDurations.addUpdateApplicationStateCallDuration(anyDuration); + opDurations.addRemoveApplicationStateCallDuration(anyDuration); + + Thread.sleep(110); + + opDurations.getMetrics(collector, true); + assertEquals("Incorrect number of perf metrics", 1, + collector.getRecords().size()); + MetricsRecord record = collector.getRecords().get(0); + MetricsRecords.assertTag(record, + ZKRMStateStoreOpDurations.RECORD_INFO.name(), + "ZKRMStateStoreOpDurations"); + + double expectAvgTime = anyDuration; + MetricsRecords.assertMetric(record, + "LoadStateCallAvgTime", expectAvgTime); + MetricsRecords.assertMetric(record, + "StoreApplicationStateCallAvgTime", expectAvgTime); + MetricsRecords.assertMetric(record, + "UpdateApplicationStateCallAvgTime", expectAvgTime); + MetricsRecords.assertMetric(record, + "RemoveApplicationStateCallAvgTime", expectAvgTime); + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java index 893d1ca627a79..3cb428c5c5960 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java @@ -18,15 +18,18 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; -import com.google.common.base.Optional; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashSet; import java.util.Map; +import java.util.Optional; import java.util.Set; import javax.crypto.SecretKey; - import org.apache.curator.test.TestingServer; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; @@ -34,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; @@ -41,16 +45,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.junit.Before; import org.junit.After; +import org.junit.Before; import org.junit.Test; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestZKRMStateStorePerf extends RMStateStoreTestBase implements Tool { @@ -98,12 +97,12 @@ public void tearDown() throws Exception { } private void initStore(String hostPort) { - Optional optHostPort = Optional.fromNullable(hostPort); + Optional optHostPort = Optional.ofNullable(hostPort); RMContext rmContext = mock(RMContext.class); conf = new YarnConfiguration(); conf.set(YarnConfiguration.RM_ZK_ADDRESS, optHostPort - .or((curatorTestingServer == null) ? "" : curatorTestingServer + .orElse((curatorTestingServer == null) ? "" : curatorTestingServer .getConnectString())); conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java index ff5738c03aab6..389dd62e4afbd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationSystem.java @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSMaxRunningAppsEnforcer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; @@ -175,6 +176,9 @@ private CapacityScheduler initializeCapacityScheduler() { CapacityScheduler cs = Mockito.spy(new CapacityScheduler()); cs.setConf(conf); + CSMaxRunningAppsEnforcer enforcer = + Mockito.mock(CSMaxRunningAppsEnforcer.class); + cs.setMaxRunningAppsEnforcer(enforcer); mockRMContext = ReservationSystemTestUtil.createRMContext(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index f69faf4ea557e..017a1e021d7cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -18,6 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; + +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.junit.Assert; import org.slf4j.Logger; @@ -135,12 +138,15 @@ public void testNMExpiry() throws Exception { String hostname3 = "localhost3"; Resource capability = BuilderUtils.newResource(1024, 1); + NodeStatus mockNodeStatus = createMockNodeStatus(); + RegisterNodeManagerRequest request1 = recordFactory .newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId1 = NodeId.newInstance(hostname1, 0); request1.setNodeId(nodeId1); request1.setHttpPort(0); request1.setResource(capability); + request1.setNodeStatus(mockNodeStatus); resourceTrackerService.registerNodeManager(request1); RegisterNodeManagerRequest request2 = recordFactory @@ -149,6 +155,7 @@ public void testNMExpiry() throws Exception { request2.setNodeId(nodeId2); request2.setHttpPort(0); request2.setResource(capability); + request2.setNodeStatus(mockNodeStatus); resourceTrackerService.registerNodeManager(request2); int waitCount = 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java index 3c4e6b424de0a..817fb9dfc3398 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -36,6 +38,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse; import org.apache.hadoop.yarn.server.api.records.NodeAction; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase; @@ -178,9 +181,13 @@ public void testCompareRMNodeAfterReconnect() throws Exception { RegisterNodeManagerRequest request1 = recordFactory .newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId1 = NodeId.newInstance(hostname1, 0); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + request1.setNodeId(nodeId1); request1.setHttpPort(0); request1.setResource(capability); + request1.setNodeStatus(mockNodeStatus); resourceTrackerService.registerNodeManager(request1); Assert.assertNotNull(context.getRMNodes().get(nodeId1)); // verify Scheduler and RMContext use same RMNode reference. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java index 32ece346c216d..ad29d274a4f5b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java @@ -70,8 +70,6 @@ public class MockRMApp implements RMApp { int maxAppAttempts = 1; List amReqs; private Set applicationTags = null; - private boolean logAggregationEnabled; - private boolean logAggregationFinished; public MockRMApp(int newid, long time, RMAppState newState) { finish = time; @@ -238,24 +236,6 @@ public int getMaxAppAttempts() { return maxAppAttempts; } - @Override - public boolean isLogAggregationEnabled() { - return logAggregationEnabled; - } - - @Override - public boolean isLogAggregationFinished() { - return logAggregationFinished; - } - - public void setLogAggregationEnabled(boolean enabled) { - this.logAggregationEnabled = enabled; - } - - public void setLogAggregationFinished(boolean finished) { - this.logAggregationFinished = finished; - } - public void setNumMaxRetries(int maxAppAttempts) { this.maxAppAttempts = maxAppAttempts; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index 8cff8fcfa8c0d..92011808b7ec8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index e2f80cafd4068..4e5ff3f7687f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -643,6 +643,8 @@ private Container allocateApplicationAttempt() { RMContainer rmContainer = mock(RMContainerImpl.class); when(scheduler.getRMContainer(container.getId())). thenReturn(rmContainer); + when(container.getNodeId()).thenReturn( + BuilderUtils.newNodeId("localhost", 0)); applicationAttempt.handle( new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(), @@ -1530,6 +1532,119 @@ public void testFinishedContainer() { .handle(Mockito.any(RMNodeEvent.class)); } + /** + * Check a completed container that is not yet pulled by AM heartbeat, + * is ACKed to NM for cleanup when the AM container exits. + */ + @Test + public void testFinishedContainerNotBeingPulledByAMHeartbeat() { + Container amContainer = allocateApplicationAttempt(); + launchApplicationAttempt(amContainer); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); + + application.handle(new RMAppRunningOnNodeEvent(application + .getApplicationId(), amContainer.getNodeId())); + + // Complete a non-AM container + ContainerId containerId1 = BuilderUtils.newContainerId(applicationAttempt + .getAppAttemptId(), 2); + Container container1 = mock(Container.class); + ContainerStatus containerStatus1 = mock(ContainerStatus.class); + when(container1.getId()).thenReturn( + containerId1); + when(containerStatus1.getContainerId()).thenReturn(containerId1); + when(container1.getNodeId()).thenReturn(NodeId.newInstance("host", 1234)); + applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent( + applicationAttempt.getAppAttemptId(), containerStatus1, + container1.getNodeId())); + + // Verify justFinishedContainers + ArgumentCaptor captor = + ArgumentCaptor.forClass(RMNodeFinishedContainersPulledByAMEvent.class); + Assert.assertEquals(1, applicationAttempt.getJustFinishedContainers() + .size()); + Assert.assertEquals(container1.getId(), applicationAttempt + .getJustFinishedContainers().get(0).getContainerId()); + Assert.assertTrue( + getFinishedContainersSentToAM(applicationAttempt).isEmpty()); + + // finish AM container to emulate AM exit event + containerStatus1 = mock(ContainerStatus.class); + ContainerId amContainerId = amContainer.getId(); + when(containerStatus1.getContainerId()).thenReturn(amContainerId); + applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent( + applicationAttempt.getAppAttemptId(), containerStatus1, + amContainer.getNodeId())); + + Mockito.verify(rmnodeEventHandler, times(2)).handle(captor.capture()); + List containerPulledEvents = + captor.getAllValues(); + // Verify AM container is acked to NM via the RMNodeEvent immediately + Assert.assertEquals(amContainer.getId(), + containerPulledEvents.get(0).getContainers().get(0)); + // Verify the non-AM container is acked to NM via the RMNodeEvent + Assert.assertEquals(container1.getId(), + containerPulledEvents.get(1).getContainers().get(0)); + Assert.assertTrue("No container shall be added to justFinishedContainers" + + " as soon as AM container exits", + applicationAttempt.getJustFinishedContainers().isEmpty()); + Assert.assertTrue( + getFinishedContainersSentToAM(applicationAttempt).isEmpty()); + } + + /** + * Check a completed container is ACKed to NM for cleanup after the AM + * container has exited. + */ + @Test + public void testFinishedContainerAfterAMExit() { + Container amContainer = allocateApplicationAttempt(); + launchApplicationAttempt(amContainer); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); + + // finish AM container to emulate AM exit event + ContainerStatus containerStatus1 = mock(ContainerStatus.class); + ContainerId amContainerId = amContainer.getId(); + when(containerStatus1.getContainerId()).thenReturn(amContainerId); + application.handle(new RMAppRunningOnNodeEvent(application + .getApplicationId(), + amContainer.getNodeId())); + applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent( + applicationAttempt.getAppAttemptId(), containerStatus1, + amContainer.getNodeId())); + + // Verify AM container is acked to NM via the RMNodeEvent immediately + ArgumentCaptor captor = + ArgumentCaptor.forClass(RMNodeFinishedContainersPulledByAMEvent.class); + Mockito.verify(rmnodeEventHandler).handle(captor.capture()); + Assert.assertEquals(amContainer.getId(), + captor.getValue().getContainers().get(0)); + + // Complete a non-AM container + ContainerId containerId1 = BuilderUtils.newContainerId(applicationAttempt + .getAppAttemptId(), 2); + Container container1 = mock(Container.class); + containerStatus1 = mock(ContainerStatus.class); + when(container1.getId()).thenReturn(containerId1); + when(containerStatus1.getContainerId()).thenReturn(containerId1); + when(container1.getNodeId()).thenReturn(NodeId.newInstance("host", 1234)); + applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent( + applicationAttempt.getAppAttemptId(), containerStatus1, + container1.getNodeId())); + + // Verify container is acked to NM via the RMNodeEvent immediately + captor = ArgumentCaptor.forClass( + RMNodeFinishedContainersPulledByAMEvent.class); + Mockito.verify(rmnodeEventHandler, times(2)).handle(captor.capture()); + Assert.assertEquals(container1.getId(), + captor.getAllValues().get(1).getContainers().get(0)); + Assert.assertTrue("No container shall be added to justFinishedContainers" + + " after AM container exited", + applicationAttempt.getJustFinishedContainers().isEmpty()); + Assert.assertTrue( + getFinishedContainersSentToAM(applicationAttempt).isEmpty()); + } + private static List getFinishedContainersSentToAM( RMAppAttempt applicationAttempt) { List containers = new ArrayList(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java index 9b1512d1e4d00..72f420eea8296 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java @@ -33,7 +33,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppMetricsChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppMetricsChecker.java index 8967234e63dfe..b9506be4b3e18 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppMetricsChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppMetricsChecker.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceMetricsChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceMetricsChecker.java index b49b125a9746d..ba37b8ef91f3d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceMetricsChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceMetricsChecker.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index e67deb5245fbb..b1a69f019bfa7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -32,8 +33,8 @@ import java.util.Set; import java.util.stream.Collectors; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; @@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; @@ -1051,9 +1053,12 @@ public void testNodemanagerReconnect() throws Exception { RegisterNodeManagerRequest request1 = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId1 = NodeId.newInstance(hostname1, 0); + NodeStatus mockNodeStatus = createMockNodeStatus(); + request1.setNodeId(nodeId1); request1.setHttpPort(0); request1.setResource(capability); + request1.setNodeStatus(mockNodeStatus); privateResourceTrackerService.registerNodeManager(request1); privateDispatcher.await(); Resource clusterResource = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestClusterNodeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestClusterNodeTracker.java index c1703bc52e3e2..14eca5ae5e3b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestClusterNodeTracker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestClusterNodeTracker.java @@ -24,11 +24,13 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSSchedulerNode; import org.apache.hadoop.yarn.util.resource.ResourceUtils; +import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -40,12 +42,19 @@ */ public class TestClusterNodeTracker { private ClusterNodeTracker nodeTracker; + private ClusterMetrics metrics; @Before public void setup() { + metrics = ClusterMetrics.getMetrics(); nodeTracker = new ClusterNodeTracker<>(); } + @After + public void teardown() { + ClusterMetrics.destroy(); + } + private void addEight4x4Nodes() { MockNodes.resetHostIds(); List rmNodes = @@ -65,6 +74,15 @@ public void testGetNodeCount() { 4, nodeTracker.nodeCount("rack0")); } + @Test + public void testIncrCapability() { + addEight4x4Nodes(); + assertEquals("Cluster Capability Memory incorrect", + metrics.getCapabilityMB(), (4096 * 8)); + assertEquals("Cluster Capability Vcores incorrect", + metrics.getCapabilityVirtualCores(), 4 * 8); + } + @Test public void testGetNodesForResourceName() throws Exception { addEight4x4Nodes(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestPartitionQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestPartitionQueueMetrics.java new file mode 100644 index 0000000000000..eb240d1b6d338 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestPartitionQueueMetrics.java @@ -0,0 +1,752 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler; + +import static org.apache.hadoop.test.MetricsAsserts.assertCounter; +import static org.apache.hadoop.test.MetricsAsserts.assertGauge; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueueMetrics; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestPartitionQueueMetrics { + + static final int GB = 1024; // MB + private static final Configuration CONF = new Configuration(); + + private MetricsSystem ms; + + @Before + public void setUp() { + ms = new MetricsSystemImpl(); + QueueMetrics.clearQueueMetrics(); + PartitionQueueMetrics.clearQueueMetrics(); + } + + @After + public void tearDown() { + ms.shutdown(); + } + + /** + * Structure: + * Both queues, q1 & q2 has been configured to run in only 1 partition, x. + * + * root + * / \ + * q1 q2 + * + * @throws Exception + */ + + @Test + public void testSinglePartitionWithSingleLevelQueueMetrics() + throws Exception { + + String parentQueueName = "root"; + Queue parentQueue = mock(Queue.class); + String user = "alice"; + + QueueMetrics root = QueueMetrics.forQueue(ms, "root", null, true, CONF); + when(parentQueue.getMetrics()).thenReturn(root); + when(parentQueue.getQueueName()).thenReturn(parentQueueName); + QueueMetrics q1 = + QueueMetrics.forQueue(ms, "root.q1", parentQueue, true, CONF); + QueueMetrics q2 = + QueueMetrics.forQueue(ms, "root.q2", parentQueue, true, CONF); + + q1.submitApp(user); + q1.submitAppAttempt(user); + + root.setAvailableResourcesToQueue("x", + Resources.createResource(200 * GB, 200)); + q1.setAvailableResourcesToQueue("x", + Resources.createResource(100 * GB, 100)); + + q1.incrPendingResources("x", user, 2, Resource.newInstance(1024, 1)); + + MetricsSource partitionSource = partitionSource(ms, "x"); + MetricsSource rootQueueSource = queueSource(ms, "x", parentQueueName); + MetricsSource q1Source = queueSource(ms, "x", "root.q1"); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2); + + q2.incrPendingResources("x", user, 3, Resource.newInstance(1024, 1)); + MetricsSource q2Source = queueSource(ms, "x", "root.q2"); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 5 * GB, 5, 5); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 5 * GB, 5, 5); + checkResources(q2Source, 0, 0, 0, 0, 0, 3 * GB, 3, 3); + } + + /** + * Structure: + * Both queues, q1 & q2 has been configured to run in both partitions, x & y. + * + * root + * / \ + * q1 q2 + * + * @throws Exception + */ + @Test + public void testTwoPartitionWithSingleLevelQueueMetrics() throws Exception { + + String parentQueueName = "root"; + String user = "alice"; + + QueueMetrics root = + QueueMetrics.forQueue(ms, parentQueueName, null, false, CONF); + Queue parentQueue = mock(Queue.class); + when(parentQueue.getMetrics()).thenReturn(root); + when(parentQueue.getQueueName()).thenReturn(parentQueueName); + + QueueMetrics q1 = + QueueMetrics.forQueue(ms, "root.q1", parentQueue, false, CONF); + QueueMetrics q2 = + QueueMetrics.forQueue(ms, "root.q2", parentQueue, false, CONF); + + AppSchedulingInfo app = mockApp(user); + q1.submitApp(user); + q1.submitAppAttempt(user); + + root.setAvailableResourcesToQueue("x", + Resources.createResource(200 * GB, 200)); + q1.setAvailableResourcesToQueue("x", + Resources.createResource(100 * GB, 100)); + + q1.incrPendingResources("x", user, 2, Resource.newInstance(1024, 1)); + + MetricsSource xPartitionSource = partitionSource(ms, "x"); + MetricsSource xRootQueueSource = queueSource(ms, "x", parentQueueName); + MetricsSource q1Source = queueSource(ms, "x", "root.q1"); + + checkResources(xPartitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(xRootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2); + + root.setAvailableResourcesToQueue("y", + Resources.createResource(400 * GB, 400)); + q2.setAvailableResourcesToQueue("y", + Resources.createResource(200 * GB, 200)); + + q2.incrPendingResources("y", user, 3, Resource.newInstance(1024, 1)); + + MetricsSource yPartitionSource = partitionSource(ms, "y"); + MetricsSource yRootQueueSource = queueSource(ms, "y", parentQueueName); + MetricsSource q2Source = queueSource(ms, "y", "root.q2"); + + checkResources(yPartitionSource, 0, 0, 0, 400 * GB, 400, 3 * GB, 3, 3); + checkResources(yRootQueueSource, 0, 0, 0, 400 * GB, 400, 3 * GB, 3, 3); + checkResources(q2Source, 0, 0, 0, 200 * GB, 200, 3 * GB, 3, 3); + } + + /** + * Structure: + * Both queues, q1 has been configured to run in multiple partitions, x & y. + * + * root + * / + * q1 + * + * @throws Exception + */ + @Test + public void testMultiplePartitionWithSingleQueueMetrics() throws Exception { + + String parentQueueName = "root"; + Queue parentQueue = mock(Queue.class); + + QueueMetrics root = + QueueMetrics.forQueue(ms, parentQueueName, null, true, CONF); + when(parentQueue.getMetrics()).thenReturn(root); + when(parentQueue.getQueueName()).thenReturn(parentQueueName); + + QueueMetrics q1 = + QueueMetrics.forQueue(ms, "root.q1", parentQueue, true, CONF); + + root.setAvailableResourcesToQueue("x", + Resources.createResource(200 * GB, 200)); + root.setAvailableResourcesToQueue("y", + Resources.createResource(300 * GB, 300)); + + q1.incrPendingResources("x", "test_user", 2, Resource.newInstance(1024, 1)); + + MetricsSource partitionSource = partitionSource(ms, "x"); + MetricsSource rootQueueSource = queueSource(ms, "x", parentQueueName); + MetricsSource q1Source = queueSource(ms, "x", "root.q1"); + MetricsSource userSource = userSource(ms, "x", "test_user", "root.q1"); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(q1Source, 0, 0, 0, 0, 0, 2 * GB, 2, 2); + checkResources(userSource, 0, 0, 0, 0, 0, 2 * GB, 2, 2); + + q1.incrPendingResources("x", "test_user", 3, Resource.newInstance(1024, 1)); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 5 * GB, 5, 5); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 5 * GB, 5, 5); + checkResources(q1Source, 0, 0, 0, 0, 0, 5 * GB, 5, 5); + checkResources(userSource, 0, 0, 0, 0, 0, 5 * GB, 5, 5); + + q1.incrPendingResources("x", "test_user1", 4, + Resource.newInstance(1024, 1)); + MetricsSource userSource1 = userSource(ms, "x", "test_user1", "root.q1"); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 9 * GB, 9, 9); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 9 * GB, 9, 9); + checkResources(q1Source, 0, 0, 0, 0, 0, 9 * GB, 9, 9); + checkResources(userSource1, 0, 0, 0, 0, 0, 4 * GB, 4, 4); + + q1.incrPendingResources("y", "test_user1", 6, + Resource.newInstance(1024, 1)); + MetricsSource partitionSourceY = partitionSource(ms, "y"); + MetricsSource rootQueueSourceY = queueSource(ms, "y", parentQueueName); + MetricsSource q1SourceY = queueSource(ms, "y", "root.q1"); + MetricsSource userSourceY = userSource(ms, "y", "test_user1", "root.q1"); + + checkResources(partitionSourceY, 0, 0, 0, 300 * GB, 300, 6 * GB, 6, 6); + checkResources(rootQueueSourceY, 0, 0, 0, 300 * GB, 300, 6 * GB, 6, 6); + checkResources(q1SourceY, 0, 0, 0, 0, 0, 6 * GB, 6, 6); + checkResources(userSourceY, 0, 0, 0, 0, 0, 6 * GB, 6, 6); + } + + /** + * Structure: + * Both queues, q1 & q2 has been configured to run in both partitions, x & y. + * + * root + * / \ + * q1 q2 + * q1 + * / \ + * q11 q12 + * q2 + * / \ + * q21 q22 + * + * @throws Exception + */ + + @Test + public void testMultiplePartitionsWithMultiLevelQueuesMetrics() + throws Exception { + + String parentQueueName = "root"; + Queue parentQueue = mock(Queue.class); + + QueueMetrics root = + QueueMetrics.forQueue(ms, parentQueueName, null, true, CONF); + when(parentQueue.getQueueName()).thenReturn(parentQueueName); + when(parentQueue.getMetrics()).thenReturn(root); + + QueueMetrics q1 = + QueueMetrics.forQueue(ms, "root.q1", parentQueue, true, CONF); + Queue childQueue1 = mock(Queue.class); + when(childQueue1.getQueueName()).thenReturn("root.q1"); + when(childQueue1.getMetrics()).thenReturn(q1); + + QueueMetrics q11 = + QueueMetrics.forQueue(ms, "root.q1.q11", childQueue1, true, CONF); + QueueMetrics q12 = + QueueMetrics.forQueue(ms, "root.q1.q12", childQueue1, true, CONF); + + QueueMetrics q2 = + QueueMetrics.forQueue(ms, "root.q2", parentQueue, true, CONF); + Queue childQueue2 = mock(Queue.class); + when(childQueue2.getQueueName()).thenReturn("root.q2"); + when(childQueue2.getMetrics()).thenReturn(q2); + + QueueMetrics q21 = + QueueMetrics.forQueue(ms, "root.q2.q21", childQueue2, true, CONF); + QueueMetrics q22 = + QueueMetrics.forQueue(ms, "root.q2.q22", childQueue2, true, CONF); + + root.setAvailableResourcesToQueue("x", + Resources.createResource(200 * GB, 200)); + + q1.setAvailableResourcesToQueue("x", + Resources.createResource(100 * GB, 100)); + q11.setAvailableResourcesToQueue("x", + Resources.createResource(50 * GB, 50)); + + q11.incrPendingResources("x", "test_user", 2, + Resource.newInstance(1024, 1)); + + MetricsSource partitionSource = partitionSource(ms, "x"); + MetricsSource rootQueueSource = queueSource(ms, "x", parentQueueName); + MetricsSource q1Source = queueSource(ms, "x", "root.q1"); + MetricsSource userSource = userSource(ms, "x", "test_user", "root.q1"); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2); + checkResources(userSource, 0, 0, 0, 0 * GB, 0, 2 * GB, 2, 2); + + q11.incrPendingResources("x", "test_user", 4, + Resource.newInstance(1024, 1)); + + MetricsSource q11Source = queueSource(ms, "x", "root.q1.q11"); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 6 * GB, 6, 6); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 6 * GB, 6, 6); + checkResources(q11Source, 0, 0, 0, 50 * GB, 50, 6 * GB, 6, 6); + checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 6 * GB, 6, 6); + checkResources(userSource, 0, 0, 0, 0 * GB, 0, 6 * GB, 6, 6); + + q11.incrPendingResources("x", "test_user1", 5, + Resource.newInstance(1024, 1)); + + MetricsSource q1UserSource1 = userSource(ms, "x", "test_user1", "root.q1"); + MetricsSource userSource1 = + userSource(ms, "x", "test_user1", "root.q1.q11"); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 11 * GB, 11, 11); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 11 * GB, 11, 11); + checkResources(q11Source, 0, 0, 0, 50 * GB, 50, 11 * GB, 11, 11); + checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 11 * GB, 11, 11); + checkResources(userSource, 0, 0, 0, 0 * GB, 0, 6 * GB, 6, 6); + checkResources(q1UserSource1, 0, 0, 0, 0 * GB, 0, 5 * GB, 5, 5); + checkResources(userSource1, 0, 0, 0, 0 * GB, 0, 5 * GB, 5, 5); + + q12.incrPendingResources("x", "test_user", 5, + Resource.newInstance(1024, 1)); + MetricsSource q12Source = queueSource(ms, "x", "root.q1.q12"); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 16 * GB, 16, 16); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 16 * GB, 16, 16); + checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 16 * GB, 16, 16); + checkResources(q12Source, 0, 0, 0, 0, 0, 5 * GB, 5, 5); + + root.setAvailableResourcesToQueue("y", + Resources.createResource(200 * GB, 200)); + q1.setAvailableResourcesToQueue("y", + Resources.createResource(100 * GB, 100)); + q12.setAvailableResourcesToQueue("y", + Resources.createResource(50 * GB, 50)); + + q12.incrPendingResources("y", "test_user", 3, + Resource.newInstance(1024, 1)); + + MetricsSource yPartitionSource = partitionSource(ms, "y"); + MetricsSource yRootQueueSource = queueSource(ms, "y", parentQueueName); + MetricsSource q1YSource = queueSource(ms, "y", "root.q1"); + MetricsSource q12YSource = queueSource(ms, "y", "root.q1.q12"); + + checkResources(yPartitionSource, 0, 0, 0, 200 * GB, 200, 3 * GB, 3, 3); + checkResources(yRootQueueSource, 0, 0, 0, 200 * GB, 200, 3 * GB, 3, 3); + checkResources(q1YSource, 0, 0, 0, 100 * GB, 100, 3 * GB, 3, 3); + checkResources(q12YSource, 0, 0, 0, 50 * GB, 50, 3 * GB, 3, 3); + + root.setAvailableResourcesToQueue("y", + Resources.createResource(200 * GB, 200)); + q2.setAvailableResourcesToQueue("y", + Resources.createResource(100 * GB, 100)); + q21.setAvailableResourcesToQueue("y", + Resources.createResource(50 * GB, 50)); + + q21.incrPendingResources("y", "test_user", 5, + Resource.newInstance(1024, 1)); + MetricsSource q21Source = queueSource(ms, "y", "root.q2.q21"); + MetricsSource q2YSource = queueSource(ms, "y", "root.q2"); + + checkResources(yPartitionSource, 0, 0, 0, 200 * GB, 200, 8 * GB, 8, 8); + checkResources(yRootQueueSource, 0, 0, 0, 200 * GB, 200, 8 * GB, 8, 8); + checkResources(q2YSource, 0, 0, 0, 100 * GB, 100, 5 * GB, 5, 5); + checkResources(q21Source, 0, 0, 0, 50 * GB, 50, 5 * GB, 5, 5); + + q22.incrPendingResources("y", "test_user", 6, + Resource.newInstance(1024, 1)); + MetricsSource q22Source = queueSource(ms, "y", "root.q2.q22"); + + checkResources(yPartitionSource, 0, 0, 0, 200 * GB, 200, 14 * GB, 14, 14); + checkResources(yRootQueueSource, 0, 0, 0, 200 * GB, 200, 14 * GB, 14, 14); + checkResources(q22Source, 0, 0, 0, 0, 0, 6 * GB, 6, 6); + } + + @Test + public void testTwoLevelWithUserMetrics() { + String parentQueueName = "root"; + String leafQueueName = "root.leaf"; + String user = "alice"; + String partition = "x"; + + QueueMetrics parentMetrics = + QueueMetrics.forQueue(ms, parentQueueName, null, true, CONF); + Queue parentQueue = mock(Queue.class); + when(parentQueue.getQueueName()).thenReturn(parentQueueName); + when(parentQueue.getMetrics()).thenReturn(parentMetrics); + QueueMetrics metrics = + QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, CONF); + AppSchedulingInfo app = mockApp(user); + + metrics.submitApp(user); + metrics.submitAppAttempt(user); + + parentMetrics.setAvailableResourcesToQueue(partition, + Resources.createResource(100 * GB, 100)); + metrics.setAvailableResourcesToQueue(partition, + Resources.createResource(100 * GB, 100)); + parentMetrics.setAvailableResourcesToUser(partition, user, + Resources.createResource(10 * GB, 10)); + metrics.setAvailableResourcesToUser(partition, user, + Resources.createResource(10 * GB, 10)); + metrics.incrPendingResources(partition, user, 6, + Resources.createResource(3 * GB, 3)); + + MetricsSource partitionSource = partitionSource(ms, partition); + MetricsSource parentQueueSource = + queueSource(ms, partition, parentQueueName); + MetricsSource queueSource = queueSource(ms, partition, leafQueueName); + MetricsSource userSource = userSource(ms, partition, user, leafQueueName); + MetricsSource userSource1 = + userSource(ms, partition, user, parentQueueName); + + checkResources(queueSource, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18, 6, 0, + 0, 0); + checkResources(parentQueueSource, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18, + 6, 0, 0, 0); + checkResources(userSource, 0, 0, 0, 0, 0, 10 * GB, 10, 18 * GB, 18, 6, 0, 0, + 0); + checkResources(userSource1, 0, 0, 0, 0, 0, 10 * GB, 10, 18 * GB, 18, 6, 0, + 0, 0); + checkResources(partitionSource, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18, + 6, 0, 0, 0); + + metrics.runAppAttempt(app.getApplicationId(), user); + + metrics.allocateResources(partition, user, 3, + Resources.createResource(1 * GB, 1), true); + metrics.reserveResource(partition, user, + Resources.createResource(3 * GB, 3)); + + // Available resources is set externally, as it depends on dynamic + // configurable cluster/queue resources + checkResources(queueSource, 3 * GB, 3, 3, 3, 0, 100 * GB, 100, 15 * GB, 15, + 3, 3 * GB, 3, 1); + checkResources(parentQueueSource, 3 * GB, 3, 3, 3, 0, 100 * GB, 100, + 15 * GB, 15, 3, 3 * GB, 3, 1); + checkResources(partitionSource, 3 * GB, 3, 3, 3, 0, 100 * GB, 100, 15 * GB, + 15, 3, 3 * GB, 3, 1); + checkResources(userSource, 3 * GB, 3, 3, 3, 0, 10 * GB, 10, 15 * GB, 15, 3, + 3 * GB, 3, 1); + checkResources(userSource1, 3 * GB, 3, 3, 3, 0, 10 * GB, 10, 15 * GB, 15, 3, + 3 * GB, 3, 1); + + metrics.allocateResources(partition, user, 3, + Resources.createResource(1 * GB, 1), true); + + checkResources(queueSource, 6 * GB, 6, 6, 6, 0, 100 * GB, 100, 12 * GB, 12, + 0, 3 * GB, 3, 1); + checkResources(parentQueueSource, 6 * GB, 6, 6, 6, 0, 100 * GB, 100, + 12 * GB, 12, 0, 3 * GB, 3, 1); + + metrics.releaseResources(partition, user, 1, + Resources.createResource(2 * GB, 2)); + metrics.unreserveResource(partition, user, + Resources.createResource(3 * GB, 3)); + checkResources(queueSource, 4 * GB, 4, 5, 6, 1, 100 * GB, 100, 12 * GB, 12, + 0, 0, 0, 0); + checkResources(parentQueueSource, 4 * GB, 4, 5, 6, 1, 100 * GB, 100, + 12 * GB, 12, 0, 0, 0, 0); + checkResources(partitionSource, 4 * GB, 4, 5, 6, 1, 100 * GB, 100, 12 * GB, + 12, 0, 0, 0, 0); + checkResources(userSource, 4 * GB, 4, 5, 6, 1, 10 * GB, 10, 12 * GB, 12, 0, + 0, 0, 0); + checkResources(userSource1, 4 * GB, 4, 5, 6, 1, 10 * GB, 10, 12 * GB, 12, 0, + 0, 0, 0); + + metrics.finishAppAttempt(app.getApplicationId(), app.isPending(), + app.getUser()); + + metrics.finishApp(user, RMAppState.FINISHED); + } + + @Test + public void testThreeLevelWithUserMetrics() { + String parentQueueName = "root"; + String leafQueueName = "root.leaf"; + String leafQueueName1 = "root.leaf.leaf1"; + String user = "alice"; + String partitionX = "x"; + String partitionY = "y"; + + QueueMetrics parentMetrics = + QueueMetrics.forQueue(parentQueueName, null, true, CONF); + Queue parentQueue = mock(Queue.class); + when(parentQueue.getQueueName()).thenReturn(parentQueueName); + when(parentQueue.getMetrics()).thenReturn(parentMetrics); + QueueMetrics metrics = + QueueMetrics.forQueue(leafQueueName, parentQueue, true, CONF); + Queue leafQueue = mock(Queue.class); + when(leafQueue.getQueueName()).thenReturn(leafQueueName); + when(leafQueue.getMetrics()).thenReturn(metrics); + QueueMetrics metrics1 = + QueueMetrics.forQueue(leafQueueName1, leafQueue, true, CONF); + AppSchedulingInfo app = mockApp(user); + + metrics1.submitApp(user); + metrics1.submitAppAttempt(user); + + parentMetrics.setAvailableResourcesToQueue(partitionX, + Resources.createResource(200 * GB, 200)); + parentMetrics.setAvailableResourcesToQueue(partitionY, + Resources.createResource(500 * GB, 500)); + metrics.setAvailableResourcesToQueue(partitionX, + Resources.createResource(100 * GB, 100)); + metrics.setAvailableResourcesToQueue(partitionY, + Resources.createResource(400 * GB, 400)); + metrics1.setAvailableResourcesToQueue(partitionX, + Resources.createResource(50 * GB, 50)); + metrics1.setAvailableResourcesToQueue(partitionY, + Resources.createResource(300 * GB, 300)); + parentMetrics.setAvailableResourcesToUser(partitionX, user, + Resources.createResource(20 * GB, 20)); + parentMetrics.setAvailableResourcesToUser(partitionY, user, + Resources.createResource(50 * GB, 50)); + metrics.setAvailableResourcesToUser(partitionX, user, + Resources.createResource(10 * GB, 10)); + metrics.setAvailableResourcesToUser(partitionY, user, + Resources.createResource(40 * GB, 40)); + metrics1.setAvailableResourcesToUser(partitionX, user, + Resources.createResource(5 * GB, 5)); + metrics1.setAvailableResourcesToUser(partitionY, user, + Resources.createResource(30 * GB, 30)); + metrics1.incrPendingResources(partitionX, user, 6, + Resources.createResource(3 * GB, 3)); + metrics1.incrPendingResources(partitionY, user, 6, + Resources.createResource(4 * GB, 4)); + + MetricsSource partitionSourceX = + partitionSource(metrics1.getMetricsSystem(), partitionX); + + MetricsSource parentQueueSourceWithPartX = + queueSource(metrics1.getMetricsSystem(), partitionX, parentQueueName); + MetricsSource queueSourceWithPartX = + queueSource(metrics1.getMetricsSystem(), partitionX, leafQueueName); + MetricsSource queueSource1WithPartX = + queueSource(metrics1.getMetricsSystem(), partitionX, leafQueueName1); + MetricsSource parentUserSourceWithPartX = userSource(metrics1.getMetricsSystem(), + partitionX, user, parentQueueName); + MetricsSource userSourceWithPartX = userSource(metrics1.getMetricsSystem(), + partitionX, user, leafQueueName); + MetricsSource userSource1WithPartX = userSource(metrics1.getMetricsSystem(), + partitionX, user, leafQueueName1); + + checkResources(partitionSourceX, 0, 0, 0, 0, 0, 200 * GB, 200, 18 * GB, 18, + 6, 0, 0, 0); + checkResources(parentQueueSourceWithPartX, 0, 0, 0, 0, 0, 200 * GB, 200, 18 * GB, + 18, 6, 0, 0, 0); + + checkResources(queueSourceWithPartX, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18, 6, + 0, 0, 0); + checkResources(queueSource1WithPartX, 0, 0, 0, 0, 0, 50 * GB, 50, 18 * GB, 18, 6, + 0, 0, 0); + checkResources(parentUserSourceWithPartX, 0, 0, 0, 0, 0, 20 * GB, 20, 18 * GB, 18, + 6, 0, 0, 0); + checkResources(userSourceWithPartX, 0, 0, 0, 0, 0, 10 * GB, 10, 18 * GB, 18, 6, 0, + 0, 0); + checkResources(userSource1WithPartX, 0, 0, 0, 0, 0, 5 * GB, 5, 18 * GB, 18, 6, 0, + 0, 0); + + MetricsSource partitionSourceY = + partitionSource(metrics1.getMetricsSystem(), partitionY); + + MetricsSource parentQueueSourceWithPartY = + queueSource(metrics1.getMetricsSystem(), partitionY, parentQueueName); + MetricsSource queueSourceWithPartY = + queueSource(metrics1.getMetricsSystem(), partitionY, leafQueueName); + MetricsSource queueSource1WithPartY = + queueSource(metrics1.getMetricsSystem(), partitionY, leafQueueName1); + MetricsSource parentUserSourceWithPartY = userSource(metrics1.getMetricsSystem(), + partitionY, user, parentQueueName); + MetricsSource userSourceWithPartY = userSource(metrics1.getMetricsSystem(), + partitionY, user, leafQueueName); + MetricsSource userSource1WithPartY = userSource(metrics1.getMetricsSystem(), + partitionY, user, leafQueueName1); + + checkResources(partitionSourceY, 0, 0, 0, 0, 0, 500 * GB, 500, 24 * GB, 24, + 6, 0, 0, 0); + checkResources(parentQueueSourceWithPartY, 0, 0, 0, 0, 0, 500 * GB, 500, 24 * GB, + 24, 6, 0, 0, 0); + checkResources(queueSourceWithPartY, 0, 0, 0, 0, 0, 400 * GB, 400, 24 * GB, 24, 6, + 0, 0, 0); + checkResources(queueSource1WithPartY, 0, 0, 0, 0, 0, 300 * GB, 300, 24 * GB, 24, 6, + 0, 0, 0); + checkResources(parentUserSourceWithPartY, 0, 0, 0, 0, 0, 50 * GB, 50, 24 * GB, 24, + 6, 0, 0, 0); + checkResources(userSourceWithPartY, 0, 0, 0, 0, 0, 40 * GB, 40, 24 * GB, 24, 6, 0, + 0, 0); + checkResources(userSource1WithPartY, 0, 0, 0, 0, 0, 30 * GB, 30, 24 * GB, 24, 6, 0, + 0, 0); + + metrics1.finishAppAttempt(app.getApplicationId(), app.isPending(), + app.getUser()); + + metrics1.finishApp(user, RMAppState.FINISHED); + } + + /** + * Structure: + * Both queues, q1 & q2 has been configured to run in only 1 partition, x + * UserMetrics has been disabled, hence trying to access the user source + * throws NPE from sources. + * + * root + * / \ + * q1 q2 + * + * @throws Exception + */ + @Test(expected = NullPointerException.class) + public void testSinglePartitionWithSingleLevelQueueMetricsWithoutUserMetrics() + throws Exception { + + String parentQueueName = "root"; + Queue parentQueue = mock(Queue.class); + String user = "alice"; + + QueueMetrics root = QueueMetrics.forQueue("root", null, false, CONF); + when(parentQueue.getMetrics()).thenReturn(root); + when(parentQueue.getQueueName()).thenReturn(parentQueueName); + CSQueueMetrics q1 = + CSQueueMetrics.forQueue("root.q1", parentQueue, false, CONF); + CSQueueMetrics q2 = + CSQueueMetrics.forQueue("root.q2", parentQueue, false, CONF); + + AppSchedulingInfo app = mockApp(user); + + q1.submitApp(user); + q1.submitAppAttempt(user); + + root.setAvailableResourcesToQueue("x", + Resources.createResource(200 * GB, 200)); + + q1.incrPendingResources("x", user, 2, Resource.newInstance(1024, 1)); + + MetricsSource partitionSource = partitionSource(q1.getMetricsSystem(), "x"); + MetricsSource rootQueueSource = + queueSource(q1.getMetricsSystem(), "x", parentQueueName); + MetricsSource q1Source = queueSource(q1.getMetricsSystem(), "x", "root.q1"); + MetricsSource q1UserSource = + userSource(q1.getMetricsSystem(), "x", user, "root.q1"); + + checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2); + checkResources(q1Source, 0, 0, 0, 0, 0, 2 * GB, 2, 2); + checkResources(q1UserSource, 0, 0, 0, 0, 0, 2 * GB, 2, 2); + + q2.incrPendingResources("x", user, 3, Resource.newInstance(1024, 1)); + MetricsSource q2Source = queueSource(q2.getMetricsSystem(), "x", "root.q2"); + MetricsSource q2UserSource = + userSource(q1.getMetricsSystem(), "x", user, "root.q2"); + + checkResources(partitionSource, 0, 0, 0, 0, 0, 5 * GB, 5, 5); + checkResources(rootQueueSource, 0, 0, 0, 0, 0, 5 * GB, 5, 5); + checkResources(q2Source, 0, 0, 0, 0, 0, 3 * GB, 3, 3); + checkResources(q2UserSource, 0, 0, 0, 0, 0, 3 * GB, 3, 3); + + q1.finishAppAttempt(app.getApplicationId(), app.isPending(), app.getUser()); + q1.finishApp(user, RMAppState.FINISHED); + } + + public static MetricsSource partitionSource(MetricsSystem ms, + String partition) { + MetricsSource s = + ms.getSource(QueueMetrics.pSourceName(partition).toString()); + return s; + } + + public static MetricsSource queueSource(MetricsSystem ms, String partition, + String queue) { + MetricsSource s = ms.getSource(QueueMetrics.pSourceName(partition) + .append(QueueMetrics.qSourceName(queue)).toString()); + return s; + } + + public static MetricsSource userSource(MetricsSystem ms, String partition, + String user, String queue) { + MetricsSource s = ms.getSource(QueueMetrics.pSourceName(partition) + .append(QueueMetrics.qSourceName(queue)).append(",user=") + .append(user).toString()); + return s; + } + + public static void checkResources(MetricsSource source, long allocatedMB, + int allocatedCores, int allocCtnrs, long availableMB, int availableCores, + long pendingMB, int pendingCores, int pendingCtnrs) { + MetricsRecordBuilder rb = getMetrics(source); + assertGauge("AllocatedMB", allocatedMB, rb); + assertGauge("AllocatedVCores", allocatedCores, rb); + assertGauge("AllocatedContainers", allocCtnrs, rb); + assertGauge("AvailableMB", availableMB, rb); + assertGauge("AvailableVCores", availableCores, rb); + assertGauge("PendingMB", pendingMB, rb); + assertGauge("PendingVCores", pendingCores, rb); + assertGauge("PendingContainers", pendingCtnrs, rb); + } + + private static AppSchedulingInfo mockApp(String user) { + AppSchedulingInfo app = mock(AppSchedulingInfo.class); + when(app.getUser()).thenReturn(user); + ApplicationId appId = BuilderUtils.newApplicationId(1, 1); + ApplicationAttemptId id = BuilderUtils.newApplicationAttemptId(appId, 1); + when(app.getApplicationAttemptId()).thenReturn(id); + return app; + } + + public static void checkResources(MetricsSource source, long allocatedMB, + int allocatedCores, int allocCtnrs, long aggreAllocCtnrs, + long aggreReleasedCtnrs, long availableMB, int availableCores, + long pendingMB, int pendingCores, int pendingCtnrs, long reservedMB, + int reservedCores, int reservedCtnrs) { + MetricsRecordBuilder rb = getMetrics(source); + assertGauge("AllocatedMB", allocatedMB, rb); + assertGauge("AllocatedVCores", allocatedCores, rb); + assertGauge("AllocatedContainers", allocCtnrs, rb); + assertCounter("AggregateContainersAllocated", aggreAllocCtnrs, rb); + assertCounter("AggregateContainersReleased", aggreReleasedCtnrs, rb); + assertGauge("AvailableMB", availableMB, rb); + assertGauge("AvailableVCores", availableCores, rb); + assertGauge("PendingMB", pendingMB, rb); + assertGauge("PendingVCores", pendingCores, rb); + assertGauge("PendingContainers", pendingCtnrs, rb); + assertGauge("ReservedMB", reservedMB, rb); + assertGauge("ReservedVCores", reservedCores, rb); + assertGauge("ReservedContainers", reservedCtnrs, rb); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java index 78f2a43ea7af6..7acfe5da0ec68 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetricsForCustomResources.java @@ -16,8 +16,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.metrics.CustomResourceMetricValue; import org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; @@ -293,7 +294,7 @@ private void testUpdatePreemptedSeconds(QueueMetricsTestData testData, } private Resource convertPreemptedSecondsToResource(QueueMetrics qm) { - QueueMetricsCustomResource customValues = qm + CustomResourceMetricValue customValues = qm .getAggregatedPreemptedSecondsResources(); MutableCounterLong vcoreSeconds = qm .getAggregateVcoreSecondsPreempted(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index 5ca6d314bca0c..b1080f7d5fea4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; +import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.After; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -99,7 +100,7 @@ public void testActiveUsersWhenMove() { app.liveContainers.put(container1.getContainerId(), container1); SchedulerNode node = createNode(); app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH, node, - toSchedulerKey(requestedPriority), container1.getContainer()); + toSchedulerKey(requestedPriority), container1); // Active user count has to decrease from queue2 due to app has NO pending requests assertEquals(0, queue2.getAbstractUsersManager().getNumActiveUsers()); @@ -142,7 +143,7 @@ public void testMove() { app.liveContainers.put(container1.getContainerId(), container1); SchedulerNode node = createNode(); app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH, node, - toSchedulerKey(requestedPriority), container1.getContainer()); + toSchedulerKey(requestedPriority), container1); // Reserved container Priority prio1 = Priority.newInstance(1); @@ -320,6 +321,43 @@ public void testAppPercentagesOnswitch() throws Exception { 0.0f); } + @Test + public void testAllResourceUsage() throws Exception { + FifoScheduler scheduler = mock(FifoScheduler.class); + when(scheduler.getClusterResource()).thenReturn(Resource.newInstance(0, 0)); + when(scheduler.getResourceCalculator()) + .thenReturn(new DefaultResourceCalculator()); + + ApplicationAttemptId appAttId = createAppAttemptId(0, 0); + RMContext rmContext = mock(RMContext.class); + when(rmContext.getEpoch()).thenReturn(3L); + when(rmContext.getScheduler()).thenReturn(scheduler); + when(rmContext.getYarnConfiguration()).thenReturn(conf); + + final String user = "user1"; + Queue queue = createQueue("test", null); + SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId, + user, queue, queue.getAbstractUsersManager(), rmContext); + + // Resource request + Resource requestedResource = Resource.newInstance(1536, 2); + app.attemptResourceUsage.incUsed("X", requestedResource); + app.attemptResourceUsage.incUsed("Y", requestedResource); + Resource r2 = Resource.newInstance(1024, 1); + app.attemptResourceUsage.incReserved("X", r2); + app.attemptResourceUsage.incReserved("Y", r2); + + assertTrue("getUsedResources expected " + Resource.newInstance(3072, 4) + + " but was " + app.getResourceUsageReport().getUsedResources(), + Resources.equals(Resource.newInstance(3072, 4), + app.getResourceUsageReport().getUsedResources())); + assertTrue("getReservedResources expected " + Resource.newInstance(2048, 2) + + " but was " + + app.getResourceUsageReport().getReservedResources(), + Resources.equals(Resource.newInstance(2048, 2), + app.getResourceUsageReport().getReservedResources())); + } + @Test public void testSchedulingOpportunityOverflow() throws Exception { ApplicationAttemptId attemptId = createAppAttemptId(0, 0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java index 83a354de5a244..a75be7745fb88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerHealth.java @@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.NodeManager; import org.apache.hadoop.yarn.server.resourcemanager.Application; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; @@ -43,6 +44,7 @@ import java.io.IOException; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.junit.Assume.assumeTrue; public class TestSchedulerHealth { @@ -170,11 +172,11 @@ public void testResourceUpdate() { } private NodeManager registerNode(String hostName, int containerManagerPort, - int httpPort, String rackName, Resource capability) throws IOException, - YarnException { + int httpPort, String rackName, Resource capability, NodeStatus nodeStatus) + throws IOException, YarnException { NodeManager nm = new NodeManager(hostName, containerManagerPort, httpPort, rackName, - capability, resourceManager); + capability, resourceManager, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext().getRMNodes() .get(nm.getNodeId())); @@ -200,11 +202,13 @@ public void testCapacitySchedulerAllocation() throws Exception { assumeTrue("This test is only supported on Capacity Scheduler", isCapacityScheduler); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * 1024, 1)); + Resources.createResource(5 * 1024, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -275,15 +279,17 @@ public void testCapacitySchedulerReservation() throws Exception { assumeTrue("This test is only supported on Capacity Scheduler", isCapacityScheduler); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register nodes String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * 1024, 1)); + Resources.createResource(2 * 1024, 1), mockNodeStatus); String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * 1024, 1)); + Resources.createResource(5 * 1024, 1), mockNodeStatus); nodeUpdate(nm_0); nodeUpdate(nm_1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index f85f06e733bfa..ca3af3d423fce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -42,7 +42,7 @@ import java.util.Map; import java.util.Set; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -104,8 +104,8 @@ import org.junit.Rule; import org.junit.Test; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.junit.rules.ExpectedException; import org.mockito.Mockito; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java index 60e25ed83ace6..229bb0f0a74c6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java index bad943c0b585d..84608effa317f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java @@ -78,9 +78,9 @@ import org.mockito.ArgumentMatchers; import org.mockito.Mockito; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestApplicationLimits { @@ -184,6 +184,7 @@ private FiCaSchedulerApp getMockApplication(int appId, String user, doReturn(amResource).when(application).getAMResource( CommonNodeLabelsManager.NO_LABEL); when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))).thenCallRealMethod(); + when(application.isRunnable()).thenReturn(true); return application; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java index 9ff066d9ef5fa..a228d254d5ad8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java @@ -73,8 +73,8 @@ import org.mockito.ArgumentMatchers; import org.mockito.Mockito; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestApplicationLimitsByPartition { final static int GB = 1024; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriorityACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriorityACLs.java index b41ba83679880..cf9a01045b991 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriorityACLs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriorityACLs.java @@ -143,6 +143,7 @@ private void submitAppToRMWithInValidAcl(String submitter, .newInstance(appSubmissionContext); try { submitterClient.submitApplication(submitRequest); + Assert.fail(); } catch (YarnException ex) { Assert.assertTrue(ex.getCause() instanceof RemoteException); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java index 65473b9eea7d8..d6f15446441d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java @@ -22,18 +22,22 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmissionData; import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmitter; +import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceProfiles; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ClusterNodeTracker; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; @@ -47,8 +51,12 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION_MB; +import static org.junit.Assert.assertEquals; /** * Test case for custom resource container allocation. @@ -64,6 +72,9 @@ public class TestCSAllocateCustomResource { private final int g = 1024; + private ClusterNodeTracker nodeTracker; + private ClusterMetrics metrics; + @Before public void setUp() throws Exception { conf = new YarnConfiguration(); @@ -182,4 +193,57 @@ public void testCapacitySchedulerInitWithCustomResourceType() .getResourceValue("yarn.io/gpu")); rm.close(); } + + @Test + public void testClusterMetricsWithGPU() + throws Exception { + metrics = ClusterMetrics.getMetrics(); + // reset resource types + ResourceUtils.resetResourceTypes(); + String resourceTypesFileName = "resource-types-test.xml"; + File source = new File( + conf.getClassLoader().getResource(resourceTypesFileName).getFile()); + resourceTypesFile = new File(source.getParent(), "resource-types.xml"); + FileUtils.copyFile(source, resourceTypesFile); + + CapacitySchedulerConfiguration newConf = + (CapacitySchedulerConfiguration) TestUtils + .getConfigurationWithMultipleQueues(conf); + newConf.setClass(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, + DominantResourceCalculator.class, ResourceCalculator.class); + //start RM + MockRM rm = new MockRM(newConf); + rm.start(); + + nodeTracker = new ClusterNodeTracker<>(); + MockNodes.resetHostIds(); + Resource nodeResource = Resource.newInstance(4096, 4, + Collections.singletonMap(GPU_URI, 4L)); + List rmNodes = + MockNodes.newNodes(2, 4, nodeResource); + for (RMNode rmNode : rmNodes) { + nodeTracker.addNode(new FiCaSchedulerNode(rmNode, false)); + } + + // Check GPU inc related cluster metrics. + assertEquals("Cluster Capability Memory incorrect", + metrics.getCapabilityMB(), (4096 * 8)); + assertEquals("Cluster Capability Vcores incorrect", + metrics.getCapabilityVirtualCores(), 4 * 8); + assertEquals("Cluster Capability GPUs incorrect", + metrics.getCapabilityGPUs(), 4 * 8); + + for (RMNode rmNode : rmNodes) { + nodeTracker.removeNode(rmNode.getNodeID()); + } + + // Check GPU dec related cluster metrics. + assertEquals("Cluster Capability Memory incorrect", + metrics.getCapabilityMB(), 0); + assertEquals("Cluster Capability Vcores incorrect", + metrics.getCapabilityVirtualCores(), 0); + assertEquals("Cluster Capability GPUs incorrect", + metrics.getCapabilityGPUs(), 0); + ClusterMetrics.destroy(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSMaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSMaxRunningAppsEnforcer.java new file mode 100644 index 0000000000000..e3c05a1b7cca5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSMaxRunningAppsEnforcer.java @@ -0,0 +1,278 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager; +import org.apache.hadoop.yarn.util.ControlledClock; +import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; +import org.junit.Before; +import org.junit.Test; + +public class TestCSMaxRunningAppsEnforcer { + private CapacitySchedulerQueueManager queueManager; + private CSMaxRunningAppsEnforcer maxAppsEnforcer; + private int appNum; + private ControlledClock clock; + private RMContext rmContext; + private CapacityScheduler scheduler; + private ActivitiesManager activitiesManager; + private CapacitySchedulerConfiguration csConfig; + + @Before + public void setup() throws IOException { + csConfig = new CapacitySchedulerConfiguration(); + rmContext = mock(RMContext.class); + when(rmContext.getYarnConfiguration()).thenReturn(csConfig); + when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap<>()); + clock = new ControlledClock(); + scheduler = mock(CapacityScheduler.class); + when(rmContext.getScheduler()).thenReturn(scheduler); + when(scheduler.getConf()).thenReturn(csConfig); + when(scheduler.getConfig()).thenReturn(csConfig); + when(scheduler.getConfiguration()).thenReturn(csConfig); + when(scheduler.getResourceCalculator()).thenReturn( + new DefaultResourceCalculator()); + when(scheduler.getRMContext()).thenReturn(rmContext); + when(scheduler.getClusterResource()) + .thenReturn(Resource.newInstance(16384, 8)); + when(scheduler.getMinimumAllocation()) + .thenReturn(Resource.newInstance(1024, 1)); + when(scheduler.getMinimumResourceCapability()) + .thenReturn(Resource.newInstance(1024, 1)); + activitiesManager = mock(ActivitiesManager.class); + maxAppsEnforcer = new CSMaxRunningAppsEnforcer(scheduler); + appNum = 0; + setupQueues(csConfig); + RMNodeLabelsManager labelManager = mock(RMNodeLabelsManager.class); + AppPriorityACLsManager appPriorityACLManager = + mock(AppPriorityACLsManager.class); + when(rmContext.getNodeLabelManager()).thenReturn(labelManager); + when(labelManager.getResourceByLabel(anyString(), any(Resource.class))) + .thenReturn(Resource.newInstance(16384, 8)); + queueManager = new CapacitySchedulerQueueManager(csConfig, labelManager, + appPriorityACLManager); + queueManager.setCapacitySchedulerContext(scheduler); + queueManager.initializeQueues(csConfig); + } + + private void setupQueues(CapacitySchedulerConfiguration config) { + config.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] {"queue1", "queue2"}); + config.setQueues("root.queue1", new String[] {"subqueue1", "subqueue2"}); + config.setQueues("root.queue1.subqueue1", new String[] {"leaf1"}); + config.setQueues("root.queue1.subqueue2", new String[] {"leaf2"}); + config.setFloat(PREFIX + "root.capacity", 100.0f); + config.setFloat(PREFIX + "root.queue1.capacity", 50.0f); + config.setFloat(PREFIX + "root.queue2.capacity", 50.0f); + config.setFloat(PREFIX + "root.queue1.subqueue1.capacity", 50.0f); + config.setFloat(PREFIX + "root.queue1.subqueue2.capacity", 50.0f); + config.setFloat(PREFIX + "root.queue1.subqueue1.leaf1.capacity", 100.0f); + config.setFloat(PREFIX + "root.queue1.subqueue2.leaf2.capacity", 100.0f); + } + + private FiCaSchedulerApp addApp(LeafQueue queue, String user) { + ApplicationId appId = ApplicationId.newInstance(0, appNum++); + ApplicationAttemptId attId = ApplicationAttemptId.newInstance(appId, 0); + + FiCaSchedulerApp attempt = new FiCaSchedulerApp(attId, + user, queue, queue.getAbstractUsersManager(), + rmContext, Priority.newInstance(0), false, + activitiesManager) { + + private final long startTime = clock.getTime(); + + @Override + public long getStartTime() { + return startTime; + } + }; + + maxAppsEnforcer.checkRunnabilityWithUpdate(attempt); + maxAppsEnforcer.trackApp(attempt); + + queue.submitApplicationAttempt(attempt, attempt.getUser()); + + return attempt; + } + + private void removeApp(FiCaSchedulerApp attempt) { + LeafQueue queue = attempt.getCSLeafQueue(); + queue.finishApplicationAttempt(attempt, queue.getQueuePath()); + maxAppsEnforcer.untrackApp(attempt); + maxAppsEnforcer.updateRunnabilityOnAppRemoval(attempt); + } + + @Test + public void testRemoveDoesNotEnableAnyApp() { + ParentQueue root = + (ParentQueue) queueManager.getRootQueue(); + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + root.setMaxParallelApps(2); + leaf1.setMaxParallelApps(1); + leaf2.setMaxParallelApps(1); + + FiCaSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + assertEquals(1, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + + removeApp(app1); + assertEquals(0, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testRemoveEnablesAppOnCousinQueue() { + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + ParentQueue queue1 = (ParentQueue) queueManager + .getQueueByFullName("root.queue1"); + queue1.setMaxParallelApps(2); + + FiCaSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + assertEquals(1, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + + removeApp(app1); + assertEquals(0, leaf1.getNumRunnableApps()); + assertEquals(2, leaf2.getNumRunnableApps()); + assertEquals(0, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testRemoveEnablesOneByQueueOneByUser() { + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + leaf1.setMaxParallelApps(2); + //userMaxApps.put("user1", 1); + csConfig.setInt(PREFIX + "user.user1.max-parallel-apps", 1); + + FiCaSchedulerApp app1 = addApp(leaf1, "user1"); + addApp(leaf1, "user2"); + addApp(leaf1, "user3"); + addApp(leaf2, "user1"); + assertEquals(2, leaf1.getNumRunnableApps()); + assertEquals(1, leaf1.getNumNonRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + + removeApp(app1); + assertEquals(2, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(0, leaf1.getNumNonRunnableApps()); + assertEquals(0, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testRemoveEnablingOrderedByStartTime() { + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + ParentQueue queue1 = (ParentQueue) queueManager + .getQueueByFullName("root.queue1"); + queue1.setMaxParallelApps(2); + FiCaSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + clock.tickSec(20); + addApp(leaf1, "user"); + assertEquals(1, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(1, leaf1.getNumNonRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + removeApp(app1); + assertEquals(0, leaf1.getNumRunnableApps()); + assertEquals(2, leaf2.getNumRunnableApps()); + assertEquals(0, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testMultipleAppsWaitingOnCousinQueue() { + LeafQueue leaf1 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue1.leaf1"); + LeafQueue leaf2 = (LeafQueue) queueManager + .getQueueByFullName("root.queue1.subqueue2.leaf2"); + ParentQueue queue1 = (ParentQueue) queueManager + .getQueueByFullName("root.queue1"); + queue1.setMaxParallelApps(2); + FiCaSchedulerApp app1 = addApp(leaf1, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + addApp(leaf2, "user"); + assertEquals(1, leaf1.getNumRunnableApps()); + assertEquals(1, leaf2.getNumRunnableApps()); + assertEquals(2, leaf2.getNumNonRunnableApps()); + removeApp(app1); + assertEquals(0, leaf1.getNumRunnableApps()); + assertEquals(2, leaf2.getNumRunnableApps()); + assertEquals(1, leaf2.getNumNonRunnableApps()); + } + + @Test + public void testMultiListStartTimeIteratorEmptyAppLists() { + List> lists = + new ArrayList>(); + lists.add(Arrays.asList(mockAppAttempt(1))); + lists.add(Arrays.asList(mockAppAttempt(2))); + Iterator iter = + new CSMaxRunningAppsEnforcer.MultiListStartTimeIterator(lists); + assertEquals(1, iter.next().getStartTime()); + assertEquals(2, iter.next().getStartTime()); + } + + private FiCaSchedulerApp mockAppAttempt(long startTime) { + FiCaSchedulerApp schedApp = mock(FiCaSchedulerApp.class); + when(schedApp.getStartTime()).thenReturn(startTime); + return schedApp; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index a746f06f2746c..6b0c42f9b160f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION_MB; @@ -52,8 +53,9 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.service.ServiceStateException; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -189,9 +191,9 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.base.Supplier; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -242,9 +244,10 @@ public void tearDown() throws Exception { private NodeManager registerNode(ResourceManager rm, String hostName, int containerManagerPort, int httpPort, String rackName, - Resource capability) throws IOException, YarnException { + Resource capability, NodeStatus nodeStatus) + throws IOException, YarnException { NodeManager nm = new NodeManager(hostName, - containerManagerPort, httpPort, rackName, capability, rm); + containerManagerPort, httpPort, rackName, capability, rm, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(rm.getRMContext().getRMNodes() .get(nm.getNodeId())); @@ -286,11 +289,11 @@ public void testConfValidation() throws Exception { } private NodeManager registerNode(String hostName, int containerManagerPort, - int httpPort, String rackName, - Resource capability) - throws IOException, YarnException { + int httpPort, String rackName, + Resource capability, NodeStatus nodeStatus) + throws IOException, YarnException { NodeManager nm = new NodeManager(hostName, containerManagerPort, httpPort, - rackName, capability, resourceManager); + rackName, capability, resourceManager, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext() .getRMNodes().get(nm.getNodeId())); @@ -303,17 +306,19 @@ public void testCapacityScheduler() throws Exception { LOG.info("--- START: testCapacityScheduler ---"); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(4 * GB, 1)); + Resources.createResource(4 * GB, 1), mockNodeStatus); // Register node2 String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * GB, 1)); + Resources.createResource(2 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -443,11 +448,13 @@ protected RMNodeLabelsManager createNodeLabelManager() { when(mC.getConfigurationProvider()).thenReturn( new LocalConfigurationProvider()); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host0 = "host_0"; NodeManager nm0 = registerNode(rm, host0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(10 * GB, 10)); + Resources.createResource(10 * GB, 10), mockNodeStatus); // ResourceRequest priorities Priority priority0 = Priority.newInstance(0); @@ -545,11 +552,13 @@ protected RMNodeLabelsManager createNodeLabelManager() { when(mC.getConfigurationProvider()).thenReturn( new LocalConfigurationProvider()); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host0 = "host_0"; NodeManager nm0 = registerNode(rm, host0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(10 * GB, 10)); + Resources.createResource(10 * GB, 10), mockNodeStatus); // ResourceRequest priorities Priority priority0 = Priority.newInstance(0); @@ -2097,17 +2106,20 @@ public void testMoveAppSameParent() throws Exception { public void testMoveAppForMoveToQueueWithFreeCap() throws Exception { ResourceScheduler scheduler = resourceManager.getResourceScheduler(); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(4 * GB, 1)); + Resources.createResource(4 * GB, 1), mockNodeStatus); // Register node2 String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * GB, 1)); + Resources.createResource(2 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -2213,17 +2225,19 @@ public void testMoveAppSuccess() throws Exception { ResourceScheduler scheduler = resourceManager.getResourceScheduler(); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * GB, 1)); + Resources.createResource(5 * GB, 1), mockNodeStatus); // Register node2 String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * GB, 1)); + Resources.createResource(5 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -2335,11 +2349,13 @@ protected RMNodeLabelsManager createNodeLabelManager() { ResourceScheduler scheduler = resourceManager.getResourceScheduler(); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(6 * GB, 1)); + Resources.createResource(6 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -2383,17 +2399,19 @@ protected RMNodeLabelsManager createNodeLabelManager() { public void testMoveAppQueueMetricsCheck() throws Exception { ResourceScheduler scheduler = resourceManager.getResourceScheduler(); + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * GB, 1)); + Resources.createResource(5 * GB, 1), mockNodeStatus); // Register node2 String host_1 = "host_1"; NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(5 * GB, 1)); + Resources.createResource(5 * GB, 1), mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); @@ -4594,9 +4612,12 @@ private void sentRMContainerLaunched(MockRM rm, ContainerId containerId) { } @Test public void testRemovedNodeDecomissioningNode() throws Exception { + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register nodemanager NodeManager nm = registerNode("host_decom", 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); RMNode node = resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); @@ -4639,10 +4660,14 @@ public void handle(Event event) { ((CapacityScheduler) resourceManager.getResourceScheduler()) .setRMContext(spyContext); ((AsyncDispatcher) mockDispatcher).start(); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java index 376a5e8599bf0..59ab077ba4ab2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java index c5eaa4c042b18..6888c01326bad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.security.Groups; import org.slf4j.Logger; @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmissionData; import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmitter; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels .NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; @@ -541,6 +542,11 @@ protected CapacitySchedulerConfiguration setupSchedulerConfiguration() { } protected MockRM setupSchedulerInstance() throws Exception { + + if (mockRM != null) { + mockRM.stop(); + } + CapacitySchedulerConfiguration conf = setupSchedulerConfiguration(); setupQueueConfiguration(conf); conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, @@ -639,16 +645,26 @@ protected void validateContainerLimits( autoCreatedLeafQueue.getMaximumAllocation().getMemorySize()); } - protected void validateInitialQueueEntitlement(CSQueue parentQueue, String - leafQueueName, Map - expectedTotalChildQueueAbsCapacityByLabel, + protected void validateInitialQueueEntitlement(CSQueue parentQueue, + String leafQueueName, + Map expectedTotalChildQueueAbsCapacityByLabel, Set nodeLabels) throws SchedulerDynamicEditException, InterruptedException { - validateInitialQueueEntitlement(cs, parentQueue, leafQueueName, + validateInitialQueueEntitlement(mockRM, cs, parentQueue, leafQueueName, expectedTotalChildQueueAbsCapacityByLabel, nodeLabels); } - protected void validateInitialQueueEntitlement( + protected void validateInitialQueueEntitlement(ResourceManager rm, + CSQueue parentQueue, String leafQueueName, + Map expectedTotalChildQueueAbsCapacityByLabel, + Set nodeLabels) + throws SchedulerDynamicEditException, InterruptedException { + validateInitialQueueEntitlement(rm, + (CapacityScheduler) rm.getResourceScheduler(), parentQueue, + leafQueueName, expectedTotalChildQueueAbsCapacityByLabel, nodeLabels); + } + + protected void validateInitialQueueEntitlement(ResourceManager rm, CapacityScheduler capacityScheduler, CSQueue parentQueue, String leafQueueName, Map expectedTotalChildQueueAbsCapacityByLabel, @@ -661,7 +677,8 @@ protected void validateInitialQueueEntitlement( (GuaranteedOrZeroCapacityOverTimePolicy) autoCreateEnabledParentQueue .getAutoCreatedQueueManagementPolicy(); - AutoCreatedLeafQueue leafQueue = (AutoCreatedLeafQueue) capacityScheduler.getQueue(leafQueueName); + AutoCreatedLeafQueue leafQueue = + (AutoCreatedLeafQueue) capacityScheduler.getQueue(leafQueueName); Map expectedEntitlements = new HashMap<>(); QueueCapacities cap = autoCreateEnabledParentQueue.getLeafQueueTemplate() @@ -679,7 +696,8 @@ protected void validateInitialQueueEntitlement( expectedEntitlements.put(label, expectedEntitlement); - validateEffectiveMinResource(leafQueue, label, expectedEntitlements); + validateEffectiveMinResource(rm, capacityScheduler, leafQueue, label, + expectedEntitlements); } } @@ -696,24 +714,24 @@ protected void validateCapacitiesByLabel(ManagedParentQueue .getMaximumCapacity(label), EPSILON); } - protected void validateEffectiveMinResource(CSQueue leafQueue, - String label, Map expectedQueueEntitlements) { + protected void validateEffectiveMinResource(ResourceManager rm, + CapacityScheduler cs, CSQueue leafQueue, String label, + Map expectedQueueEntitlements) { ManagedParentQueue parentQueue = (ManagedParentQueue) leafQueue.getParent(); - Resource resourceByLabel = mockRM.getRMContext().getNodeLabelManager(). - getResourceByLabel(label, cs.getClusterResource()); + Resource resourceByLabel = rm.getRMContext().getNodeLabelManager() + .getResourceByLabel(label, cs.getClusterResource()); Resource effMinCapacity = Resources.multiply(resourceByLabel, - expectedQueueEntitlements.get(label).getCapacity() * parentQueue - .getQueueCapacities().getAbsoluteCapacity(label)); + expectedQueueEntitlements.get(label).getCapacity() + * parentQueue.getQueueCapacities().getAbsoluteCapacity(label)); assertEquals(effMinCapacity, Resources.multiply(resourceByLabel, leafQueue.getQueueCapacities().getAbsoluteCapacity(label))); assertEquals(effMinCapacity, leafQueue.getEffectiveCapacity(label)); if (leafQueue.getQueueCapacities().getAbsoluteCapacity(label) > 0) { - assertTrue(Resources - .greaterThan(cs.getResourceCalculator(), cs.getClusterResource(), - effMinCapacity, Resources.none())); - } else{ + assertTrue(Resources.greaterThan(cs.getResourceCalculator(), + cs.getClusterResource(), effMinCapacity, Resources.none())); + } else { assertTrue(Resources.equals(effMinCapacity, Resources.none())); } } @@ -824,7 +842,7 @@ private void validateQueueEntitlementChanges(AutoCreatedLeafQueue leafQueue, updatedQueueTemplate.getQueueCapacities().getMaximumCapacity (label)); assertEquals(expectedQueueEntitlements.get(label), newEntitlement); - validateEffectiveMinResource(leafQueue, label, + validateEffectiveMinResource(mockRM, cs, leafQueue, label, expectedQueueEntitlements); } found = true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java index a98abb9a004c2..596cca1402fa2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java @@ -19,22 +19,27 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmissionData; import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmitter; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.placement .ApplicationPlacementContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -84,7 +89,6 @@ import static org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule.CURRENT_USER_MAPPING; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueueUtils.EPSILON; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -162,6 +166,35 @@ public void testAutoCreateLeafQueueCreation() throws Exception { } } + @Test(timeout = 20000) + public void testAutoCreateLeafQueueCreationUsingFullParentPath() + throws Exception { + + try { + setupGroupQueueMappings("root.d", cs.getConfiguration(), "%user"); + cs.reinitialize(cs.getConfiguration(), mockRM.getRMContext()); + + submitApp(mockRM, cs.getQueue("d"), TEST_GROUPUSER, TEST_GROUPUSER, 1, 1); + AutoCreatedLeafQueue autoCreatedLeafQueue = + (AutoCreatedLeafQueue) cs.getQueue(TEST_GROUPUSER); + ManagedParentQueue parentQueue = (ManagedParentQueue) cs.getQueue("d"); + assertEquals(parentQueue, autoCreatedLeafQueue.getParent()); + + Map expectedChildQueueAbsCapacity = + new HashMap() {{ + put(NO_LABEL, 0.02f); + }}; + + validateInitialQueueEntitlement(parentQueue, TEST_GROUPUSER, + expectedChildQueueAbsCapacity, + new HashSet() {{ add(NO_LABEL); }}); + + } finally { + cleanupQueue(USER0); + cleanupQueue(TEST_GROUPUSER); + } + } + @Test public void testReinitializeStoppedAutoCreatedLeafQueue() throws Exception { try { @@ -641,7 +674,7 @@ public void testClusterResourceUpdationOnAutoCreatedLeafQueues() throws submitApp(newMockRM, parentQueue, USER1, USER1, 1, 1); Map expectedAbsChildQueueCapacity = populateExpectedAbsCapacityByLabelForParentQueue(1); - validateInitialQueueEntitlement(newCS, parentQueue, USER1, + validateInitialQueueEntitlement(newMockRM, newCS, parentQueue, USER1, expectedAbsChildQueueCapacity, accessibleNodeLabelsOnC); //submit another app2 as USER2 @@ -649,7 +682,7 @@ public void testClusterResourceUpdationOnAutoCreatedLeafQueues() throws 1); expectedAbsChildQueueCapacity = populateExpectedAbsCapacityByLabelForParentQueue(2); - validateInitialQueueEntitlement(newCS, parentQueue, USER2, + validateInitialQueueEntitlement(newMockRM, newCS, parentQueue, USER2, expectedAbsChildQueueCapacity, accessibleNodeLabelsOnC); //validate total activated abs capacity remains the same @@ -744,7 +777,7 @@ public void testReinitializeQueuesWithAutoCreatedLeafQueues() Map expectedChildQueueAbsCapacity = populateExpectedAbsCapacityByLabelForParentQueue(1); - validateInitialQueueEntitlement(newCS, parentQueue, USER1, + validateInitialQueueEntitlement(newMockRM, newCS, parentQueue, USER1, expectedChildQueueAbsCapacity, accessibleNodeLabelsOnC); //submit another app2 as USER2 @@ -753,7 +786,7 @@ public void testReinitializeQueuesWithAutoCreatedLeafQueues() 1); expectedChildQueueAbsCapacity = populateExpectedAbsCapacityByLabelForParentQueue(2); - validateInitialQueueEntitlement(newCS, parentQueue, USER2, + validateInitialQueueEntitlement(newMockRM, newCS, parentQueue, USER2, expectedChildQueueAbsCapacity, accessibleNodeLabelsOnC); //update parent queue capacity @@ -810,4 +843,62 @@ public void testReinitializeQueuesWithAutoCreatedLeafQueues() } } } + + @Test + public void testDynamicAutoQueueCreationWithTags() + throws Exception { + MockRM rm = null; + try { + CapacitySchedulerConfiguration csConf + = new CapacitySchedulerConfiguration(); + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] {"a", "b"}); + csConf.setCapacity("root.a", 90); + csConf.setCapacity("root.b", 10); + csConf.setAutoCreateChildQueueEnabled("root.a", true); + csConf.setAutoCreatedLeafQueueConfigCapacity("root.a", 50); + csConf.setAutoCreatedLeafQueueConfigMaxCapacity("root.a", 100); + csConf.setAcl("root.a", QueueACL.ADMINISTER_QUEUE, "*"); + csConf.setAcl("root.a", QueueACL.SUBMIT_APPLICATIONS, "*"); + csConf.setBoolean(YarnConfiguration + .APPLICATION_TAG_BASED_PLACEMENT_ENABLED, true); + csConf.setStrings(YarnConfiguration + .APPLICATION_TAG_BASED_PLACEMENT_USER_WHITELIST, "hadoop"); + csConf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, + "u:%user:root.a.%user"); + + RMNodeLabelsManager mgr = new NullRMNodeLabelsManager(); + mgr.init(csConf); + rm = new MockRM(csConf) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + rm.start(); + MockNM nm = rm.registerNode("127.0.0.1:1234", 16 * GB); + + MockRMAppSubmissionData data = + MockRMAppSubmissionData.Builder.createWithMemory(GB, rm) + .withAppName("apptodynamicqueue") + .withUser("hadoop") + .withAcls(null) + .withUnmanagedAM(false) + .withApplicationTags(Sets.newHashSet("userid=testuser")) + .build(); + RMApp app = MockRMAppSubmitter.submit(rm, data); + MockRM.launchAndRegisterAM(app, rm, nm); + nm.nodeHeartbeat(true); + + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + CSQueue queue = cs.getQueue("root.a.testuser"); + assertNotNull("Leaf queue has not been auto-created", queue); + assertEquals("Number of running applications", 1, + queue.getNumApplications()); + } finally { + if (rm != null) { + rm.close(); + } + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java index ba05e248cac16..fceef183d7261 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ResourceRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java new file mode 100644 index 0000000000000..38edda64462e9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMaxParallelApps.java @@ -0,0 +1,312 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmissionData; +import org.apache.hadoop.yarn.server.resourcemanager.MockRMAppSubmitter; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; +import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; + +public class TestCapacitySchedulerMaxParallelApps { + private CapacitySchedulerConfiguration conf; + private MockRM rm; + private MockNM nm1; + + private RMApp app1; + private MockAM am1; + private RMApp app2; + private MockAM am2; + private RMApp app3; + private RMAppAttempt attempt3; + private RMApp app4; + private RMAppAttempt attempt4; + + private ParentQueue rootQueue; + private LeafQueue defaultQueue; + + @Before + public void setUp() { + CapacitySchedulerConfiguration config = + new CapacitySchedulerConfiguration(); + config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, + DominantResourceCalculator.class.getName()); + + conf = new CapacitySchedulerConfiguration(config); + } + + @After + public void after() { + if (rm != null) { + rm.stop(); + } + } + + @Test(timeout = 30000) + public void testMaxParallelAppsExceedsQueueSetting() throws Exception { + conf.setInt("yarn.scheduler.capacity.root.default.max-parallel-apps", 2); + executeCommonStepsAndChecks(); + testWhenSettingsExceeded(); + } + + @Test(timeout = 30000) + public void testMaxParallelAppsExceedsDefaultQueueSetting() + throws Exception { + conf.setInt("yarn.scheduler.capacity.max-parallel-apps", 2); + executeCommonStepsAndChecks(); + testWhenSettingsExceeded(); + } + + @Test(timeout = 30000) + public void testMaxParallelAppsExceedsUserSetting() throws Exception { + conf.setInt("yarn.scheduler.capacity.user.testuser.max-parallel-apps", 2); + executeCommonStepsAndChecks(); + testWhenSettingsExceeded(); + } + + @Test(timeout = 30000) + public void testMaxParallelAppsExceedsDefaultUserSetting() throws Exception { + conf.setInt("yarn.scheduler.capacity.user.max-parallel-apps", 2); + executeCommonStepsAndChecks(); + testWhenSettingsExceeded(); + } + + @Test(timeout = 30000) + public void testMaxParallelAppsWhenReloadingConfig() throws Exception { + conf.setInt("yarn.scheduler.capacity.root.default.max-parallel-apps", 2); + + executeCommonStepsAndChecks(); + + RMContext rmContext = rm.getRMContext(); + // Disable parallel apps setting + max out AM percent + conf.unset("yarn.scheduler.capacity.root.default.max-parallel-apps"); + conf.setFloat(PREFIX + "maximum-am-resource-percent", 1.0f); + CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); + cs.reinitialize(conf, rmContext); + + // Both app #3 and app #4 should transition to RUNNABLE + launchAMandWaitForRunning(app3, attempt3, nm1); + launchAMandWaitForRunning(app4, attempt4, nm1); + verifyRunningAndAcceptedApps(4, 0); + } + + @Test(timeout = 30000) + public void testMaxAppsReachedWithNonRunnableApps() throws Exception { + conf.setInt("yarn.scheduler.capacity.root.default.max-parallel-apps", 2); + conf.setInt("yarn.scheduler.capacity.root.default.maximum-applications", 4); + executeCommonStepsAndChecks(); + + RMApp app5 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app5") + .withUser("testuser") + .withQueue("default") + .withWaitForAppAcceptedState(false) + .build()); + + rm.waitForState(app5.getApplicationId(), RMAppState.FAILED); + } + + private void executeCommonStepsAndChecks() throws Exception { + rm = new MockRM(conf); + rm.start(); + + nm1 = rm.registerNode("h1:1234", 4096, 8); + rm.registerNode("h2:1234", 4096, 8); + rm.registerNode("h3:1234", 4096, 8); + + rm.drainEvents(); + + app1 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app1") + .withUser("testuser") + .withQueue("default") + .build()); + + am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); + + app2 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app2") + .withUser("testuser") + .withQueue("default") + .build()); + am2 = MockRM.launchAndRegisterAM(app2, rm, nm1); + + app3 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app3") + .withUser("testuser") + .withQueue("default") + .build()); + attempt3 = MockRM.waitForAttemptScheduled(app3, rm); + + app4 = MockRMAppSubmitter.submit(rm, + MockRMAppSubmissionData.Builder.createWithMemory(512, rm) + .withAppName("app4") + .withUser("testuser") + .withQueue("default") + .build()); + attempt4 = MockRM.waitForAttemptScheduled(app4, rm); + + // Check that app attempt #3 and #4 are non-runnable + rootQueue = getRootQueue(); + defaultQueue = getDefaultQueue(); + Set nonRunnables = + Sets.newHashSet( + attempt3.getAppAttemptId(), + attempt4.getAppAttemptId()); + verifyRunnableAppsInParent(rootQueue, 2); + verifyRunnableAppsInLeaf(defaultQueue, 2, nonRunnables); + verifyRunningAndAcceptedApps(2, 2); + } + + private void testWhenSettingsExceeded() throws Exception { + // Stop app #1 + unregisterAMandWaitForFinish(app1, am1, nm1); + + // Launch app #3 + launchAMandWaitForRunning(app3, attempt3, nm1); + + // Check that attempt #4 is still non-runnable + verifyRunnableAppsInParent(rootQueue, 2); + verifyRunnableAppsInLeaf(defaultQueue, 2, + Collections.singleton(attempt4.getAppAttemptId())); + verifyRunningAndAcceptedApps(2, 1); + + // Stop app #2 + unregisterAMandWaitForFinish(app2, am2, nm1); + + // Launch app #4 + launchAMandWaitForRunning(app4, attempt4, nm1); + verifyRunnableAppsInParent(rootQueue, 2); + verifyRunnableAppsInLeaf(defaultQueue, 2, + Collections.emptySet()); + verifyRunningAndAcceptedApps(2, 0); + } + + @SuppressWarnings("checkstyle:hiddenfield") + private LeafQueue getDefaultQueue() { + CSQueue defaultQueue = + ((CapacityScheduler) rm.getResourceScheduler()).getQueue("default"); + + return (LeafQueue) defaultQueue; + } + + private ParentQueue getRootQueue() { + CSQueue root = + ((CapacityScheduler) rm.getResourceScheduler()).getQueue("root"); + + return (ParentQueue) root; + } + + private void verifyRunnableAppsInParent(ParentQueue queue, + int expectedRunnable) { + assertEquals("Num of runnable apps", expectedRunnable, + queue.getNumRunnableApps()); + } + + private void verifyRunnableAppsInLeaf(LeafQueue queue, int expectedRunnable, + Set nonRunnableIds) { + assertEquals("Num of runnable apps", expectedRunnable, + queue.getNumRunnableApps()); + + queue.getCopyOfNonRunnableAppSchedulables() + .stream() + .map(fca -> fca.getApplicationAttemptId()) + .forEach(id -> assertTrue(id + " not found as non-runnable", + nonRunnableIds.contains(id))); + } + + private void verifyRunningAndAcceptedApps(int expectedRunning, + int expectedAccepted) throws YarnException { + GetApplicationsRequest request = GetApplicationsRequest.newInstance(); + + GetApplicationsResponse resp = + rm.getClientRMService().getApplications(request); + + List apps = resp.getApplicationList(); + + long runningCount = apps + .stream() + .filter(report -> + report.getYarnApplicationState() == YarnApplicationState.RUNNING) + .count(); + + long acceptedCount = apps + .stream() + .filter(report -> + report.getYarnApplicationState() == YarnApplicationState.ACCEPTED) + .count(); + + assertEquals("Running apps count", expectedRunning, runningCount); + assertEquals("Accepted apps count", expectedAccepted, acceptedCount); + } + + private void unregisterAMandWaitForFinish(RMApp app, MockAM am, MockNM nm) + throws Exception { + am.unregisterAppAttempt(); + nm.nodeHeartbeat(app.getCurrentAppAttempt().getAppAttemptId(), 1, + ContainerState.COMPLETE); + rm.waitForState(app.getCurrentAppAttempt().getAppAttemptId(), + RMAppAttemptState.FINISHED); + } + + @SuppressWarnings("rawtypes") + private MockAM launchAMandWaitForRunning(RMApp app, RMAppAttempt attempt, + MockNM nm) throws Exception { + nm.nodeHeartbeat(true); + ((AbstractYarnScheduler)rm.getResourceScheduler()).update(); + rm.drainEvents(); + nm.nodeHeartbeat(true); + MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId()); + am.registerAppAttempt(); + rm.waitForState(app.getApplicationId(), RMAppState.RUNNING); + + return am; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java index fa85ca7813188..bb2cbfdba148a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java @@ -22,6 +22,7 @@ import java.util.Iterator; import java.util.List; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; @@ -223,6 +224,7 @@ public void testExcessReservationWillBeUnreserved() throws Exception { CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); LeafQueue leafQueue = (LeafQueue) cs.getQueue("default"); FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId()); @@ -234,12 +236,13 @@ public void testExcessReservationWillBeUnreserved() throws Exception { * after its ask has been cancelled when used capacity of root queue is 1. */ // Ask a container with 6GB memory size for app1, - // nm1 will reserve a container for app1 + // nm2 will reserve a container for app1 + // Last Node from Node Iterator will be RESERVED am1.allocate("*", 6 * GB, 1, new ArrayList<>()); - cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); // Check containers of app1 and app2. - Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); + Assert.assertNotNull(cs.getNode(nm2.getNodeId()).getReservedContainer()); Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); Assert.assertEquals(1, schedulerApp1.getReservedContainers().size()); Assert.assertEquals(1, schedulerApp2.getLiveContainers().size()); @@ -324,12 +327,13 @@ public void testAllocateForReservedContainer() throws Exception { * after node has sufficient resource. */ // Ask a container with 6GB memory size for app2, - // nm1 will reserve a container for app2 + // nm2 will reserve a container for app2 + // Last Node from Node Iterator will be RESERVED am2.allocate("*", 6 * GB, 1, new ArrayList<>()); cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); // Check containers of app1 and app2. - Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); + Assert.assertNotNull(cs.getNode(nm2.getNodeId()).getReservedContainer()); Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); Assert.assertEquals(1, schedulerApp2.getLiveContainers().size()); Assert.assertEquals(1, schedulerApp2.getReservedContainers().size()); @@ -344,4 +348,100 @@ public void testAllocateForReservedContainer() throws Exception { rm1.close(); } + + @Test(timeout=30000) + public void testAllocateOfReservedContainerFromAnotherNode() + throws Exception { + CapacitySchedulerConfiguration newConf = + new CapacitySchedulerConfiguration(conf); + newConf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER, + YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER); + newConf.setInt(CapacitySchedulerConfiguration.MULTI_NODE_SORTING_POLICY_NAME + + ".resource-based.sorting-interval.ms", 0); + newConf.setMaximumApplicationMasterResourcePerQueuePercent("root.default", + 1.0f); + MockRM rm1 = new MockRM(newConf); + + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 12 * GB, 2); + MockNM nm2 = rm1.registerNode("h2:1234", 12 * GB, 2); + + // launch an app1 to queue, AM container will be launched in nm1 + RMApp app1 = MockRMAppSubmitter.submit(rm1, + MockRMAppSubmissionData.Builder.createWithMemory(8 * GB, rm1) + .withAppName("app") + .withUser("user") + .withAcls(null) + .withQueue("default") + .build()); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // launch another app2 to queue, AM container will be launched in nm2 + RMApp app2 = MockRMAppSubmitter.submit(rm1, + MockRMAppSubmissionData.Builder.createWithMemory(8 * GB, rm1) + .withAppName("app") + .withUser("user") + .withAcls(null) + .withQueue("default") + .build()); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); + + // Reserve a Container for app3 + RMApp app3 = MockRMAppSubmitter.submit(rm1, + MockRMAppSubmissionData.Builder.createWithMemory(8 * GB, rm1) + .withAppName("app") + .withUser("user") + .withAcls(null) + .withQueue("default") + .build()); + + final AtomicBoolean result = new AtomicBoolean(false); + Thread t = new Thread() { + public void run() { + try { + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1); + result.set(true); + } catch (Exception e) { + Assert.fail("Failed to allocate the reserved container"); + } + } + }; + t.start(); + Thread.sleep(1000); + + // Validate if app3 has got RESERVED container + FiCaSchedulerApp schedulerApp = + cs.getApplicationAttempt(app3.getCurrentAppAttempt().getAppAttemptId()); + Assert.assertEquals("App3 failed to get reserved container", 1, + schedulerApp.getReservedContainers().size()); + + // Free the Space on other node where Reservation has not happened + if (cs.getNode(rmNode1.getNodeID()).getReservedContainer() != null) { + rm1.killApp(app2.getApplicationId()); + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + } else { + rm1.killApp(app1.getApplicationId()); + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + } + + // Check if Reserved AM of app3 gets allocated in + // node where space available + while (!result.get()) { + Thread.sleep(100); + } + + // Validate release of reserved containers + schedulerApp = + cs.getApplicationAttempt(app3.getCurrentAppAttempt().getAppAttemptId()); + Assert.assertEquals("App3 failed to release Reserved container", 0, + schedulerApp.getReservedContainers().size()); + Assert.assertNull(cs.getNode(rmNode1.getNodeID()).getReservedContainer()); + Assert.assertNull(cs.getNode(rmNode2.getNodeID()).getReservedContainer()); + + rm1.close(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java index 5ca6aceb92e52..d7f81c46260fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java @@ -54,9 +54,9 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestCapacitySchedulerNodeLabelUpdate { private final int GB = 1024; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPerf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPerf.java index c58cf5480b38d..b71fe063927ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPerf.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPerf.java @@ -41,11 +41,13 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceUtils; @@ -57,6 +59,7 @@ import java.util.HashMap; import java.util.Map; import java.util.PriorityQueue; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES; import static org.junit.Assert.assertEquals; @@ -72,6 +75,29 @@ private String getResourceName(int idx) { return "resource-" + idx; } + public static class CapacitySchedulerPerf extends CapacityScheduler { + volatile boolean enable = false; + AtomicLong count = new AtomicLong(0); + + public CapacitySchedulerPerf() { + super(); + } + + @Override + CSAssignment allocateContainersToNode( + CandidateNodeSet candidates, + boolean withNodeHeartbeat) { + CSAssignment retVal = super.allocateContainersToNode(candidates, + withNodeHeartbeat); + + if (enable) { + count.incrementAndGet(); + } + + return retVal; + } + } + // This test is run only when when -DRunCapacitySchedulerPerfTests=true is set // on the command line. In addition, this test has tunables for the following: // Number of queues: -DNumberOfQueues (default=100) @@ -88,6 +114,9 @@ private void testUserLimitThroughputWithNumberOfResourceTypes( throws Exception { Assume.assumeTrue(Boolean.valueOf( System.getProperty("RunCapacitySchedulerPerfTests"))); + int numThreads = Integer.valueOf(System.getProperty( + "CapacitySchedulerPerfTestsNumThreads", "0")); + if (numOfResourceTypes > 2) { // Initialize resource map Map riMap = new HashMap<>(); @@ -112,13 +141,30 @@ private void testUserLimitThroughputWithNumberOfResourceTypes( CapacitySchedulerConfiguration csconf = createCSConfWithManyQueues(numQueues); + if (numThreads > 0) { + csconf.setScheduleAynschronously(true); + csconf.setInt( + CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_THREAD, + numThreads); + csconf.setLong( + CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX + + ".scheduling-interval-ms", 0); + } YarnConfiguration conf = new YarnConfiguration(csconf); // Don't reset resource types since we have already configured resource // types conf.setBoolean(TEST_CONF_RESET_RESOURCE_TYPES, false); - conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, - ResourceScheduler.class); + + if (numThreads > 0) { + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacitySchedulerPerf.class, + ResourceScheduler.class); + // avoid getting skipped (see CapacityScheduler.shouldSkipNodeSchedule) + conf.setLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 600000); + } else { + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + } MockRM rm = new MockRM(conf); rm.start(); @@ -189,6 +235,13 @@ private void testUserLimitThroughputWithNumberOfResourceTypes( RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); + if (numThreads > 0) { + // disable async scheduling threads + for (CapacityScheduler.AsyncScheduleThread t : cs.asyncSchedulerThreads) { + t.suspendSchedule(); + } + } + FiCaSchedulerApp[] fiCaApps = new FiCaSchedulerApp[totalApps]; for (int i=0;i limit, not used >= limit - cs.handle(new NodeUpdateSchedulerEvent(node)); - cs.handle(new NodeUpdateSchedulerEvent(node2)); + if (numThreads > 0) { + // enable async scheduling threads + for (CapacityScheduler.AsyncScheduleThread t : cs.asyncSchedulerThreads) { + t.beginSchedule(); + } - // make sure only the extra apps have allocated containers - for (int i=0;i limit, not used >= limit + cs.handle(new NodeUpdateSchedulerEvent(node)); + cs.handle(new NodeUpdateSchedulerEvent(node2)); + + // make sure only the extra apps have allocated containers + for (int i=0;i queue = new PriorityQueue<>(topn, - Collections.reverseOrder()); - - long n = Time.monotonicNow(); - long timespent = 0; - for (int i = 0; i < iterations; i+=2) { - if (i > 0 && i % printInterval == 0){ - long ts = (Time.monotonicNow() - n); - if (queue.size() < topn) { - queue.offer(ts); - } else { - Long last = queue.peek(); - if (last > ts) { - queue.poll(); + + if (numThreads > 0) { + System.out.println("Starting now"); + ((CapacitySchedulerPerf) cs).enable = true; + long start = Time.monotonicNow(); + Thread.sleep(60000); + long end = Time.monotonicNow(); + ((CapacitySchedulerPerf) cs).enable = false; + long numOps = ((CapacitySchedulerPerf) cs).count.get(); + System.out.println("Number of operations: " + numOps); + System.out.println("Time taken: " + (end - start) + " ms"); + System.out.println("" + (numOps * 1000 / (end - start)) + + " ops / second"); + } else { + final int topn = 20; + final int iterations = 2000000; + final int printInterval = 20000; + final float numerator = 1000.0f * printInterval; + PriorityQueue queue = new PriorityQueue<>(topn, + Collections.reverseOrder()); + + long n = Time.monotonicNow(); + long timespent = 0; + for (int i = 0; i < iterations; i+=2) { + if (i > 0 && i % printInterval == 0){ + long ts = (Time.monotonicNow() - n); + if (queue.size() < topn) { queue.offer(ts); + } else { + Long last = queue.peek(); + if (last > ts) { + queue.poll(); + queue.offer(ts); + } } + System.out.println(i + " " + (numerator / ts)); + n = Time.monotonicNow(); } - System.out.println(i + " " + (numerator / ts)); - n= Time.monotonicNow(); + cs.handle(new NodeUpdateSchedulerEvent(node)); + cs.handle(new NodeUpdateSchedulerEvent(node2)); } - cs.handle(new NodeUpdateSchedulerEvent(node)); - cs.handle(new NodeUpdateSchedulerEvent(node2)); - } - timespent=0; - int entries = queue.size(); - while(queue.size() > 0){ - long l = queue.poll(); - timespent += l; + timespent = 0; + int entries = queue.size(); + while (queue.size() > 0) { + long l = queue.poll(); + timespent += l; + } + System.out.println("#ResourceTypes = " + numOfResourceTypes + + ". Avg of fastest " + entries + + ": " + numerator / (timespent / entries) + " ops/sec of " + + appCount + " apps on " + pctActiveQueues + "% of " + numQueues + + " queues."); } - System.out.println( - "#ResourceTypes = " + numOfResourceTypes + ". Avg of fastest " + entries - + ": " + numerator / (timespent / entries) + " ops/sec of " - + appCount + " apps on " + pctActiveQueues + "% of " + numQueues - + " queues."); - // make sure only the extra apps have allocated containers - for (int i=0;i 0) { + // count the number of apps with allocated containers + int numNotPending = 0; + for (int i = 0; i < totalApps; i++) { + boolean pending = fiCaApps[i].getAppSchedulingInfo().isPending(); + if (!pending) { + numNotPending++; + assertEquals(0, + fiCaApps[i].getTotalPendingRequestsPerPartition().size()); + } else { + assertEquals(1*GB, + fiCaApps[i].getTotalPendingRequestsPerPartition() + .get(RMNodeLabelsManager.NO_LABEL).getMemorySize()); + } + } + + // make sure only extra apps have allocated containers + assertEquals(activeQueues, numNotPending); + } else { + // make sure only the extra apps have allocated containers + for (int i = 0; i < totalApps; i++) { + boolean pending = fiCaApps[i].getAppSchedulingInfo().isPending(); + if (i < activeQueues) { + assertFalse(pending); + assertEquals(0, + fiCaApps[i].getTotalPendingRequestsPerPartition().size()); + } else { + assertTrue(pending); + assertEquals(1 * GB, + fiCaApps[i].getTotalPendingRequestsPerPartition() + .get(RMNodeLabelsManager.NO_LABEL).getMemorySize()); + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueACLs.java index 9eeb9b4acf314..bb5b790fe2fc5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueACLs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueACLs.java @@ -17,15 +17,22 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.io.IOException; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.QueueACLsTestBase; +import org.junit.Test; public class TestCapacitySchedulerQueueACLs extends QueueACLsTestBase { @Override @@ -132,6 +139,7 @@ public void updateConfigWithDAndD1Queues(String rootAcl, String queueDAcl, .reinitialize(csConf, resourceManager.getRMContext()); } + private void setQueueCapacity(CapacitySchedulerConfiguration csConf, float capacity, String queuePath) { csConf.setCapacity(queuePath, capacity); @@ -142,4 +150,38 @@ private void setAdminAndSubmitACL(CapacitySchedulerConfiguration csConf, csConf.setAcl(queuePath, QueueACL.ADMINISTER_QUEUE, queueAcl); csConf.setAcl(queuePath, QueueACL.SUBMIT_APPLICATIONS, queueAcl); } + + @Test + public void testCheckAccessForUserWithOnlyLeafNameProvided() { + testCheckAccess(false, "dynamicQueue"); + } + + @Test + public void testCheckAccessForUserWithFullPathProvided() { + testCheckAccess(true, "root.users.dynamicQueue"); + } + + @Test + public void testCheckAccessForRootQueue() { + testCheckAccess(false, "root"); + } + + private void testCheckAccess(boolean expectedResult, String queueName) { + CapacitySchedulerQueueManager qm = + mock(CapacitySchedulerQueueManager.class); + CSQueue root = mock(ParentQueue.class); + CSQueue users = mock(ManagedParentQueue.class); + when(qm.getQueue("root")).thenReturn(root); + when(qm.getQueue("root.users")).thenReturn(users); + when(users.hasAccess(any(QueueACL.class), + any(UserGroupInformation.class))).thenReturn(true); + UserGroupInformation mockUGI = mock(UserGroupInformation.class); + + CapacityScheduler cs = + (CapacityScheduler) resourceManager.getResourceScheduler(); + cs.setQueueManager(qm); + + assertEquals("checkAccess() failed", expectedResult, + cs.checkAccess(mockUGI, QueueACL.ADMINISTER_QUEUE, queueName)); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java index adad39686471f..5beda2522561a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java @@ -29,7 +29,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.MappingType; import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMapping.QueueMappingBuilder; -import org.apache.hadoop.yarn.server.resourcemanager.placement.QueueMappingEntity; import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SimpleGroupsMapping; @@ -84,17 +83,20 @@ public static CapacitySchedulerConfiguration setupQueueMappingsForRules( existingMappingsForUG.addAll(queueMappingsForUG); conf.setQueueMappings(existingMappingsForUG); - List existingMappingsForAN = + List existingMappingsForAN = conf.getQueueMappingEntity(QUEUE_MAPPING_NAME); //set queue mapping - List queueMappingsForAN = + List queueMappingsForAN = new ArrayList<>(); for (int i = 0; i < sourceIds.length; i++) { //Set C as parent queue name for auto queue creation - QueueMappingEntity queueMapping = - new QueueMappingEntity(USER + sourceIds[i], - getQueueMapping(parentQueue, USER + sourceIds[i])); + QueueMapping queueMapping = QueueMapping.QueueMappingBuilder.create() + .type(MappingType.APPLICATION) + .source(USER + sourceIds[i]) + .queue(getQueueMapping(parentQueue, USER + sourceIds[i])) + .build(); + queueMappingsForAN.add(queueMapping); } @@ -206,7 +208,7 @@ public void testNestedUserQueueWithStaticParentQueue() throws Exception { ApplicationPlacementContext ctx2 = r.getPlacementForApp(asc, "user2"); assertEquals("Queue", "user2", ctx2.getQueue()); - assertEquals("Queue", "c", ctx2.getParentQueue()); + assertEquals("Queue", "root.c", ctx2.getParentQueue()); } finally { if(mockRM != null) { mockRM.close(); @@ -398,7 +400,7 @@ public void testDynamicPrimaryGroupQueue() throws Exception { // u:user2:%primary_group QueueMapping userQueueMapping2 = QueueMappingBuilder.create() .type(QueueMapping.MappingType.USER) - .source("user2") + .source("a1") .queue("%primary_group") .build(); @@ -430,8 +432,8 @@ public void testDynamicPrimaryGroupQueue() throws Exception { ApplicationPlacementContext ctx = r.getPlacementForApp(asc, "user1"); assertEquals("Queue", "b1", ctx.getQueue()); - ApplicationPlacementContext ctx1 = r.getPlacementForApp(asc, "user2"); - assertEquals("Queue", "user2group", ctx1.getQueue()); + ApplicationPlacementContext ctx1 = r.getPlacementForApp(asc, "a1"); + assertEquals("Queue", "a1group", ctx1.getQueue()); } finally { if (mockRM != null) { mockRM.close(); @@ -467,14 +469,14 @@ public void testFixedUserWithDynamicGroupQueue() throws Exception { // u:user2:%primary_group QueueMapping userQueueMapping2 = QueueMappingBuilder.create() .type(QueueMapping.MappingType.USER) - .source("user2") + .source("a1") .queue("%primary_group") .build(); // u:b4:%secondary_group QueueMapping userQueueMapping3 = QueueMappingBuilder.create() .type(QueueMapping.MappingType.USER) - .source("b4") + .source("e") .queue("%secondary_group") .build(); @@ -507,11 +509,11 @@ public void testFixedUserWithDynamicGroupQueue() throws Exception { ApplicationPlacementContext ctx = r.getPlacementForApp(asc, "user1"); assertEquals("Queue", "b1", ctx.getQueue()); - ApplicationPlacementContext ctx1 = r.getPlacementForApp(asc, "user2"); - assertEquals("Queue", "user2group", ctx1.getQueue()); + ApplicationPlacementContext ctx1 = r.getPlacementForApp(asc, "a1"); + assertEquals("Queue", "a1group", ctx1.getQueue()); - ApplicationPlacementContext ctx2 = r.getPlacementForApp(asc, "b4"); - assertEquals("Queue", "b4subgroup1", ctx2.getQueue()); + ApplicationPlacementContext ctx2 = r.getPlacementForApp(asc, "e"); + assertEquals("Queue", "esubgroup1", ctx2.getQueue()); } finally { if (mockRM != null) { mockRM.close(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java index 3007a5a007cf5..acb5489bf50ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java index ab094f5f4c785..ce79528c2599a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java index 62b43e8e3bc97..98cffda51cc81 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java @@ -60,7 +60,7 @@ import static org.junit.Assert.assertEquals; import org.junit.Test; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWorkflowPriorityMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWorkflowPriorityMapping.java index 044f5ab2bf62b..63fc37e997894 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWorkflowPriorityMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWorkflowPriorityMapping.java @@ -35,8 +35,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager.WorkflowPriorityMapping; import org.junit.Test; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestCapacitySchedulerWorkflowPriorityMapping extends CapacitySchedulerTestBase { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java index c1b1c52edef4d..e3c6e7c68b72b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java @@ -224,7 +224,7 @@ protected Dispatcher createDispatcher() { verifyContainerDecreased(response, containerId1, 1 * GB); // Wait for scheduler to finish processing kill events.. - dispatcher.waitForEventThreadToWait(); + dispatcher.await(); checkUsedResource(rm1, "default", 1 * GB, null); Assert.assertEquals(1 * GB, @@ -657,7 +657,7 @@ protected Dispatcher createDispatcher() { // Trigger a node heartbeat.. cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); - dispatcher.waitForEventThreadToWait(); + dispatcher.await(); /* Check statuses after reservation satisfied */ // Increase request should be unreserved Assert.assertTrue(app.getReservedContainers().isEmpty()); @@ -774,7 +774,7 @@ protected Dispatcher createDispatcher() { am1.allocate(null, null); // Wait for scheduler to process all events. - dispatcher.waitForEventThreadToWait(); + dispatcher.await(); /* Check statuses after reservation satisfied */ // Increase request should be unreserved diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 0cf008fb6e058..89024b63bebdf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -28,6 +28,7 @@ .capacity.CapacitySchedulerConfiguration.ROOT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -45,15 +46,18 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -456,6 +460,8 @@ public void testPolicyConfiguration() throws Exception { @Test public void testAppAttemptMetrics() throws Exception { + CSMaxRunningAppsEnforcer enforcer = mock(CSMaxRunningAppsEnforcer.class); + cs.setMaxRunningAppsEnforcer(enforcer); // Manipulate queue 'a' LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B)); @@ -898,6 +904,537 @@ public void testDRFUserLimits() throws Exception { assertEquals(expectedRatio, b.getUsersManager().getUsageRatio(""), 0.001); } + @Test + public void testUserLimitCache() throws Exception { + // Parameters + final int numNodes = 4; + final int nodeSize = 100; + final int numAllocationThreads = 2; + final int numUsers = 40; + final int containerSize = 1 * GB; + final int numContainersPerApp = 10; + final int runTime = 5000; // in ms + + Random random = new Random(); + + // Setup nodes + FiCaSchedulerNode[] nodes = new FiCaSchedulerNode[numNodes]; + Map nodesMap = new HashMap<>(nodes.length); + for (int i = 0; i < numNodes; i++) { + String host = "127.0.0." + i; + FiCaSchedulerNode node = TestUtils.getMockNode(host, DEFAULT_RACK, 0, + nodeSize * GB, nodeSize); + nodes[i] = node; + nodesMap.put(node.getNodeID(), node); + } + + Resource clusterResource = + Resources.createResource(numNodes * (nodeSize * GB), + numNodes * nodeSize); + + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + when(csContext.getClusterResource()).thenReturn(clusterResource); + + // working with just one queue + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[]{A}); + csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + A, 100); + csConf.setMaximumCapacity(CapacitySchedulerConfiguration.ROOT + "." + A, + 100); + + // reinitialize queues + CSQueueStore newQueues = new CSQueueStore(); + CSQueue newRoot = + CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, + CapacitySchedulerConfiguration.ROOT, + newQueues, queues, + TestUtils.spyHook); + queues = newQueues; + root.reinitialize(newRoot, csContext.getClusterResource()); + root.updateClusterResource(clusterResource, + new ResourceLimits(clusterResource)); + + // Mock the queue + LeafQueue leafQueue = stubLeafQueue((LeafQueue) queues.get(A)); + + // Set user limit factor so some users are at their limit and the + // user limit cache has more than just a few entries + leafQueue.setUserLimitFactor(10 / nodeSize); + + // Flag to let allocation threads know to stop + AtomicBoolean stopThreads = new AtomicBoolean(false); + AtomicBoolean errorInThreads = new AtomicBoolean(false); + + // Set up allocation threads + Thread[] threads = new Thread[numAllocationThreads]; + for (int i = 0; i < numAllocationThreads; i++) { + threads[i] = new Thread(new Runnable() { + @Override + public void run() { + try { + boolean alwaysNull = true; + while (!stopThreads.get()) { + CSAssignment assignment = leafQueue.assignContainers( + clusterResource, + nodes[random.nextInt(numNodes)], + new ResourceLimits(clusterResource), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + applyCSAssignment(clusterResource, assignment, leafQueue, + nodesMap, leafQueue.applicationAttemptMap); + + if (assignment != CSAssignment.NULL_ASSIGNMENT) { + alwaysNull = false; + } + Thread.sleep(500); + } + + // One more assignment but not committing so that the + // user limits cache is updated to the latest version + CSAssignment assignment = leafQueue.assignContainers( + clusterResource, + nodes[random.nextInt(numNodes)], + new ResourceLimits(clusterResource), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + + if (alwaysNull && assignment == CSAssignment.NULL_ASSIGNMENT) { + LOG.error("Thread only got null assignments"); + errorInThreads.set(true); + } + } catch (Exception e) { + LOG.error("Thread exiting because of exception", e); + errorInThreads.set(true); + } + } + }, "Scheduling Thread " + i); + } + + // Set up users and some apps + final String[] users = new String[numUsers]; + for (int i = 0; i < users.length; i++) { + users[i] = "user_" + i; + } + List applicationAttemptIds = + new ArrayList<>(10); + List apps = new ArrayList<>(10); + Priority priority = TestUtils.createMockPriority(1); + + // Start up 10 apps to begin with + int appId; + for (appId = 0; appId < 10; appId++) { + String user = users[random.nextInt(users.length)]; + ApplicationAttemptId applicationAttemptId = + TestUtils.getMockApplicationAttemptId(appId, 0); + FiCaSchedulerApp app = new FiCaSchedulerApp(applicationAttemptId, + user, + leafQueue, leafQueue.getUsersManager(), spyRMContext); + + leafQueue.submitApplicationAttempt(app, user); + app.updateResourceRequests(Collections.singletonList( + TestUtils.createResourceRequest(ResourceRequest.ANY, containerSize, + numContainersPerApp, true, priority, recordFactory))); + + applicationAttemptIds.add(applicationAttemptId); + apps.add(app); + } + + // Start threads + for (int i = 0; i < numAllocationThreads; i++) { + threads[i].start(); + } + + final long startTime = Time.monotonicNow(); + while (true) { + // Start a new app about half the iterations and stop a random app the + // rest of the iterations + boolean startOrStopApp = random.nextBoolean(); + if (startOrStopApp || (apps.size() == 1)) { + // start a new app + String user = users[random.nextInt(users.length)]; + ApplicationAttemptId applicationAttemptId = + TestUtils.getMockApplicationAttemptId(appId, 0); + FiCaSchedulerApp app = new FiCaSchedulerApp(applicationAttemptId, + user, + leafQueue, leafQueue.getUsersManager(), spyRMContext); + + leafQueue.submitApplicationAttempt(app, user); + app.updateResourceRequests(Collections.singletonList( + TestUtils.createResourceRequest(ResourceRequest.ANY, containerSize, + numContainersPerApp, true, priority, recordFactory))); + + applicationAttemptIds.add(applicationAttemptId); + apps.add(app); + + appId++; + } else { + // stop a random app + int i = random.nextInt(apps.size()); + FiCaSchedulerApp app = apps.get(i); + leafQueue.finishApplication(app.getApplicationId(), app.getUser()); + leafQueue.releaseResource(clusterResource, app, + app.getCurrentConsumption(), "", null); + apps.remove(i); + applicationAttemptIds.remove(i); + } + + if (errorInThreads.get() || (Time.monotonicNow() - startTime) > runTime) { + break; + } + } + + // signal allocation threads to stop + stopThreads.set(true); + + // wait for allocation threads to be done + for (int i = 0; i < numAllocationThreads; i++) { + threads[i].join(); + } + + // check if there was an error in the allocation threads + assertFalse(errorInThreads.get()); + + // check there is only one partition in the user limits cache + assertEquals( 1, leafQueue.userLimitsCache.size()); + + Map> + uLCByPartition = leafQueue.userLimitsCache.get(nodes[0].getPartition()); + + // check there is only one scheduling mode + assertEquals(uLCByPartition.size(), 1); + + ConcurrentMap uLCBySchedulingMode = + uLCByPartition.get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + + // check entries in the user limits cache + for (Map.Entry entry : + uLCBySchedulingMode.entrySet()) { + String user = entry.getKey(); + Resource userLimit = entry.getValue().userLimit; + + Resource expectedUL = leafQueue.getResourceLimitForActiveUsers(user, + clusterResource, nodes[0].getPartition(), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + + assertEquals(expectedUL, userLimit); + } + + // check the current version in the user limits cache + assertEquals(leafQueue.getUsersManager().getLatestVersionOfUsersState(), + leafQueue.currentUserLimitCacheVersion); + assertTrue(leafQueue.currentUserLimitCacheVersion > 0); + } + + @Test + public void testUserLimitCacheActiveUsersChanged() throws Exception { + // Setup some nodes + String host_0 = "127.0.0.1"; + FiCaSchedulerNode node_0 = + TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 6*GB); + String host_1 = "127.0.0.2"; + FiCaSchedulerNode node_1 = + TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 6*GB); + String host_2 = "127.0.0.3"; + FiCaSchedulerNode node_2 = + TestUtils.getMockNode(host_2, DEFAULT_RACK, 0, 6*GB); + String host_3 = "127.0.0.4"; + FiCaSchedulerNode node_3 = + TestUtils.getMockNode(host_3, DEFAULT_RACK, 0, 6*GB); + + Map nodes = + ImmutableMap.of( + node_0.getNodeID(), node_0, + node_1.getNodeID(), node_1, + node_2.getNodeID(), node_2, + node_3.getNodeID(), node_3 + ); + + final int numNodes = 4; + Resource clusterResource = + Resources.createResource(numNodes * (6*GB), numNodes); + + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + when(csContext.getClusterResource()).thenReturn(clusterResource); + + // working with just one queue + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {A}); + csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + A, 100); + csConf.setMaximumCapacity(CapacitySchedulerConfiguration.ROOT + "." + A, + 100); + + // reinitialize queues + CSQueueStore newQueues = new CSQueueStore(); + CSQueue newRoot = + CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, + CapacitySchedulerConfiguration.ROOT, + newQueues, queues, + TestUtils.spyHook); + queues = newQueues; + root.reinitialize(newRoot, csContext.getClusterResource()); + root.updateClusterResource(clusterResource, + new ResourceLimits(clusterResource)); + + // Mock the queue + LeafQueue leafQueue = stubLeafQueue((LeafQueue)queues.get(A)); + + // initial check + assertEquals(0, leafQueue.userLimitsCache.size()); + assertEquals(0, + leafQueue.getUsersManager().preComputedAllUserLimit.size()); + assertEquals(0, + leafQueue.getUsersManager().preComputedActiveUserLimit.size()); + + // 4 users + final String user_0 = "user_0"; + final String user_1 = "user_1"; + final String user_2 = "user_2"; + final String user_3 = "user_3"; + + // Set user-limit + leafQueue.setUserLimit(0); + leafQueue.setUserLimitFactor(1.0f); + + Priority priority = TestUtils.createMockPriority(1); + + // Fill queue because user limit is calculated as (used / #active users). + final ApplicationAttemptId appAttemptId_9 = + TestUtils.getMockApplicationAttemptId(9, 0); + FiCaSchedulerApp app_9 = + new FiCaSchedulerApp(appAttemptId_9, user_0, leafQueue, + leafQueue.getUsersManager(), spyRMContext); + leafQueue.submitApplicationAttempt(app_9, user_0); + + Map apps = + ImmutableMap.of(app_9.getApplicationAttemptId(), app_9); + + app_9.updateResourceRequests(Arrays.asList( + TestUtils.createResourceRequest(host_0, 1*GB, 5, true, + priority, recordFactory), + TestUtils.createResourceRequest(DEFAULT_RACK, 1*GB, 5, true, + priority, recordFactory), + TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 5, true, + priority, recordFactory))); + assertEquals(1, leafQueue.getUsersManager().getNumActiveUsers()); + + CSAssignment assignment; + for (int i = 0; i < 5; i++) { + assignment = leafQueue.assignContainers(clusterResource, node_0, + new ResourceLimits(clusterResource), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + applyCSAssignment(clusterResource, assignment, leafQueue, nodes, apps); + } + app_9.updateResourceRequests(Arrays.asList( + TestUtils.createResourceRequest(host_1, 1*GB, 5, true, + priority, recordFactory), + TestUtils.createResourceRequest(DEFAULT_RACK, 1*GB, 5, true, + priority, recordFactory), + TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 5, true, + priority, recordFactory))); + for (int i = 0; i < 5; i++) { + assignment = leafQueue.assignContainers(clusterResource, node_1, + new ResourceLimits(clusterResource), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + applyCSAssignment(clusterResource, assignment, leafQueue, nodes, apps); + } + // A total of 10GB have been allocated + assertEquals(10*GB, leafQueue.getUsedResources().getMemorySize()); + assertEquals(10*GB, app_9.getCurrentConsumption().getMemorySize()); + // For one user who should have been cached in the assignContainers call + assertEquals(1, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .size()); + // But the cache is stale because an allocation was made + assertNotEquals(leafQueue.currentUserLimitCacheVersion, + leafQueue.getUsersManager().getLatestVersionOfUsersState()); + // Have not made any calls to fill up the all user limit in UsersManager + assertEquals(0, + leafQueue.getUsersManager().preComputedAllUserLimit.size()); + // But the user limit cache in leafQueue got filled up using the active + // user limit in UsersManager + assertEquals(1, + leafQueue.getUsersManager().preComputedActiveUserLimit.size()); + + // submit 3 applications for now + final ApplicationAttemptId appAttemptId_0 = + TestUtils.getMockApplicationAttemptId(0, 0); + FiCaSchedulerApp app_0 = + new FiCaSchedulerApp(appAttemptId_0, user_0, leafQueue, + leafQueue.getUsersManager(), spyRMContext); + leafQueue.submitApplicationAttempt(app_0, user_0); + + final ApplicationAttemptId appAttemptId_1 = + TestUtils.getMockApplicationAttemptId(1, 0); + FiCaSchedulerApp app_1 = + new FiCaSchedulerApp(appAttemptId_1, user_1, leafQueue, + leafQueue.getUsersManager(), spyRMContext); + leafQueue.submitApplicationAttempt(app_1, user_1); + + final ApplicationAttemptId appAttemptId_2 = + TestUtils.getMockApplicationAttemptId(2, 0); + FiCaSchedulerApp app_2 = + new FiCaSchedulerApp(appAttemptId_2, user_2, leafQueue, + leafQueue.getUsersManager(), spyRMContext); + leafQueue.submitApplicationAttempt(app_2, user_2); + + apps = ImmutableMap.of( + app_0.getApplicationAttemptId(), app_0, + app_1.getApplicationAttemptId(), app_1, + app_2.getApplicationAttemptId(), app_2 + ); + + // requests from first three users (all of which will be locality delayed) + app_0.updateResourceRequests(Arrays.asList( + TestUtils.createResourceRequest(host_0, 4*GB, 1, true, + priority, recordFactory), + TestUtils.createResourceRequest(DEFAULT_RACK, 4*GB, 1, true, + priority, recordFactory), + TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 10, true, + priority, recordFactory))); + + app_1.updateResourceRequests(Arrays.asList( + TestUtils.createResourceRequest(host_0, 4*GB, 1, true, + priority, recordFactory), + TestUtils.createResourceRequest(DEFAULT_RACK, 4*GB, 1, true, + priority, recordFactory), + TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 10, true, + priority, recordFactory))); + + app_2.updateResourceRequests(Arrays.asList( + TestUtils.createResourceRequest(host_0, 4*GB, 1, true, + priority, recordFactory), + TestUtils.createResourceRequest(DEFAULT_RACK, 4*GB, 1, true, + priority, recordFactory), + TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 10, true, + priority, recordFactory))); + + // There are 3 active users right now + assertEquals(3, leafQueue.getUsersManager().getNumActiveUsers()); + + // fill up user limit cache + assignment = leafQueue.assignContainers(clusterResource, node_1, + new ResourceLimits(clusterResource), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + applyCSAssignment(clusterResource, assignment, leafQueue, nodes, apps); + // A total of 10GB have been allocated + assertEquals(10*GB, leafQueue.getUsedResources().getMemorySize()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(10*GB, app_9.getCurrentConsumption().getMemorySize()); + // There are three users who should have been cached + assertEquals(3, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .size()); + // There are three users so each has a limit of 12/3 = 4GB + assertEquals(4*GB, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .get(user_0).userLimit.getMemorySize()); + assertEquals(4*GB, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .get(user_1).userLimit.getMemorySize()); + assertEquals(4*GB, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .get(user_2).userLimit.getMemorySize()); + // And the cache is NOT stale because no allocation was made + assertEquals(leafQueue.currentUserLimitCacheVersion, + leafQueue.getUsersManager().getLatestVersionOfUsersState()); + // Have not made any calls to fill up the all user limit in UsersManager + assertEquals(0, + leafQueue.getUsersManager().preComputedAllUserLimit.size()); + // But the user limit cache in leafQueue got filled up using the active + // user limit in UsersManager with 4GB limit (since there are three users + // so 12/3 = 4GB each) + assertEquals(1, leafQueue.getUsersManager() + .preComputedActiveUserLimit.size()); + assertEquals(1, leafQueue.getUsersManager() + .preComputedActiveUserLimit.get(RMNodeLabelsManager.NO_LABEL).size()); + assertEquals(4*GB, leafQueue.getUsersManager() + .preComputedActiveUserLimit.get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY).getMemorySize()); + + // submit the 4th application + final ApplicationAttemptId appAttemptId_3 = + TestUtils.getMockApplicationAttemptId(3, 0); + FiCaSchedulerApp app_3 = + new FiCaSchedulerApp(appAttemptId_3, user_3, leafQueue, + leafQueue.getUsersManager(), spyRMContext); + leafQueue.submitApplicationAttempt(app_3, user_3); + + apps = ImmutableMap.of( + app_0.getApplicationAttemptId(), app_0, + app_1.getApplicationAttemptId(), app_1, + app_2.getApplicationAttemptId(), app_2, + app_3.getApplicationAttemptId(), app_3 + ); + + app_3.updateResourceRequests(Arrays.asList( + TestUtils.createResourceRequest(host_0, 4*GB, 1, true, + priority, recordFactory), + TestUtils.createResourceRequest(DEFAULT_RACK, 4*GB, 1, true, + priority, recordFactory), + TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 10, true, + priority, recordFactory))); + + // 4 active users now + assertEquals(4, leafQueue.getUsersManager().getNumActiveUsers()); + // Check that the user limits cache has become stale + assertNotEquals(leafQueue.currentUserLimitCacheVersion, + leafQueue.getUsersManager().getLatestVersionOfUsersState()); + + // Even though there are no allocations, user limit cache is repopulated + assignment = leafQueue.assignContainers(clusterResource, node_1, + new ResourceLimits(clusterResource), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + applyCSAssignment(clusterResource, assignment, leafQueue, nodes, apps); + // A total of 10GB have been allocated + assertEquals(10*GB, leafQueue.getUsedResources().getMemorySize()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); + assertEquals(10*GB, app_9.getCurrentConsumption().getMemorySize()); + // There are four users who should have been cached + assertEquals(4, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .size()); + // There are four users so each has a limit of 12/4 = 3GB + assertEquals(3*GB, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .get(user_0).userLimit.getMemorySize()); + assertEquals(3*GB, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .get(user_1).userLimit.getMemorySize()); + assertEquals(3*GB, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .get(user_2).userLimit.getMemorySize()); + assertEquals(3*GB, leafQueue.userLimitsCache + .get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) + .get(user_3).userLimit.getMemorySize()); + // And the cache is NOT stale because no allocation was made + assertEquals(leafQueue.currentUserLimitCacheVersion, + leafQueue.getUsersManager().getLatestVersionOfUsersState()); + // Have not made any calls to fill up the all user limit in UsersManager + assertEquals(0, + leafQueue.getUsersManager().preComputedAllUserLimit.size()); + // But the user limit cache in leafQueue got filled up using the active + // user limit in UsersManager with 3GB limit (since there are four users + // so 12/4 = 3GB each) + assertEquals(1, leafQueue.getUsersManager() + .preComputedActiveUserLimit.size()); + assertEquals(1, leafQueue.getUsersManager() + .preComputedActiveUserLimit.get(RMNodeLabelsManager.NO_LABEL).size()); + assertEquals(3*GB, leafQueue.getUsersManager() + .preComputedActiveUserLimit.get(RMNodeLabelsManager.NO_LABEL) + .get(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY).getMemorySize()); + } + @Test public void testUserLimits() throws Exception { // Mock the queue @@ -1237,9 +1774,9 @@ public void testComputeUserLimitAndSetHeadroom() throws IOException { qb.finishApplication(app_0.getApplicationId(), user_0); qb.finishApplication(app_2.getApplicationId(), user_1); qb.releaseResource(clusterResource, app_0, Resource.newInstance(4*GB, 1), - null, null); + "", null); qb.releaseResource(clusterResource, app_2, Resource.newInstance(4*GB, 1), - null, null); + "", null); qb.setUserLimit(50); qb.setUserLimitFactor(1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java index 9e45759acc579..e21a60f3d7e9a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java @@ -25,9 +25,11 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; @@ -49,11 +51,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestPartitionQueueMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestQueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; @@ -65,9 +70,9 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestNodeLabelContainerAllocation { private final int GB = 1024; @@ -637,6 +642,295 @@ public RMNodeLabelsManager createNodeLabelManager() { rm1.close(); } + @Test (timeout = 120000) + public void testContainerReservationContinueLookingWithLabels() + throws Exception { + // set node -> label + mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), + toSet("x"), NodeId.newInstance("h2", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM( + TestUtils.getConfigurationWithQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB); // label = x + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); + LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1"); + + ContainerId containerId; + + // launch an app to queue a1 (label = x) + MockRMAppSubmissionData data1 = + MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1) + .withAppName("app1") + .withUser("user") + .withAcls(null) + .withQueue("a1") + .withUnmanagedAM(false) + .withAmLabel("x") + .build(); + RMApp app1 = MockRMAppSubmitter.submit(rm1, data1); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1 + .getApplicationAttemptId()); + + // Verify live on node1 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(2 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(2 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // request map containers for app1. + am1.allocate("*", 5 * GB, 2, 5, new ArrayList(), "x"); + + // Do node heartbeat to allocate first mapper on node1 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // Verify live on node1 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(7 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(7 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // Do node heartbeat to allocate second mapper on node2 + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + + // Verify live on node2 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // node1 7 GB used, node2 5 GB used + Assert.assertEquals(3, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(12 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(12 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // request reducer containers for app1. + am1.allocate("*", 3 * GB, 2, 10, new ArrayList(), "x"); + + // Do node heartbeat to reserve reducer on node1 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // node1 7 GB used and 3 GB reserved, node2 5 GB used + Assert.assertEquals(3, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(1, schedulerApp1.getReservedContainers().size()); + Assert.assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(3 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(15 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(3 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // Do node heartbeat to allocate container for second reducer on node2 + // This should unreserve the reserved container + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + + // Verify live on node2 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 5); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // node1 7 GB used and 0 GB reserved, node2 8 GB used + Assert.assertEquals(4, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(0, schedulerApp1.getReservedContainers().size()); + Assert.assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(15 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + rm1.close(); + } + + @Test (timeout = 120000) + public void testContainerReservationContinueLookingWithDefaultLabels() + throws Exception { + // This is the same as testContainerReservationContinueLookingWithLabels, + // but this test doesn't specify the label expression in the + // ResourceRequest, instead it uses default queue label expressions + mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), + toSet("x"), NodeId.newInstance("h2", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM( + TestUtils.getConfigurationWithDefaultQueueLabels(conf)) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB); // label = x + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); + LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1"); + + ContainerId containerId; + + // launch an app to queue a1 (label = x) + MockRMAppSubmissionData data1 = + MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1) + .withAppName("app1") + .withUser("user") + .withAcls(null) + .withQueue("a1") + .withUnmanagedAM(false) + .build(); + RMApp app1 = MockRMAppSubmitter.submit(rm1, data1); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1 + .getApplicationAttemptId()); + + // Verify live on node1 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + Assert.assertEquals(1, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(2 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(2 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // request map containers for app1. + am1.allocate("*", 5 * GB, 2, 5, new ArrayList(), null); + + // Do node heartbeat to allocate first mapper on node1 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // Verify live on node1 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(7 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(7 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // Do node heartbeat to allocate second mapper on node2 + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + + // Verify live on node2 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // node1 7 GB used, node2 5 GB used + Assert.assertEquals(3, schedulerApp1.getLiveContainers().size()); + Assert.assertFalse(schedulerApp1.getReservedContainers().size() > 0); + Assert.assertEquals(12 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(12 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // request reducer containers for app1. + am1.allocate("*", 3 * GB, 2, 10, new ArrayList(), null); + + // Do node heartbeat to reserve reducer on node1 + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + // node1 7 GB used and 3 GB reserved, node2 5 GB used + Assert.assertEquals(3, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(1, schedulerApp1.getReservedContainers().size()); + Assert.assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(3 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(15 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(3 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + // Do node heartbeat to allocate container for second reducer on node2 + // This should unreserve the reserved container + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + + // Verify live on node2 + containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 5); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // node1 7 GB used and 0 GB reserved, node2 8 GB used + Assert.assertEquals(4, schedulerApp1.getLiveContainers().size()); + Assert.assertEquals(0, schedulerApp1.getReservedContainers().size()); + Assert.assertEquals(15 * GB, cs.getRootQueue().getQueueResourceUsage() + .getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() + .getReserved("x").getMemorySize()); + Assert.assertEquals(15 * GB, + leafQueue.getQueueResourceUsage().getUsed("x").getMemorySize()); + Assert.assertEquals(0 * GB, + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); + + rm1.close(); + } + @Test (timeout = 120000) public void testRMContainerLeakInLeafQueue() throws Exception { // set node -> label @@ -2338,10 +2632,9 @@ public RMNodeLabelsManager createNodeLabelManager() { assertEquals(0 * GB, leafQueueB.getMetrics().getAvailableMB()); assertEquals(0 * GB, leafQueueB.getMetrics().getAllocatedMB()); - // The total memory tracked by QueueMetrics is 0GB for the default partition CSQueue rootQueue = cs.getRootQueue(); - assertEquals(0*GB, rootQueue.getMetrics().getAvailableMB() + - rootQueue.getMetrics().getAllocatedMB()); + assertEquals(0 * GB, rootQueue.getMetrics().getAvailableMB() + + rootQueue.getMetrics().getAllocatedMB()); // Kill all apps in queue a cs.killAllAppsInQueue("a"); @@ -2390,6 +2683,8 @@ public void testQueueMetricsWithLabelsOnDefaultLabelNode() throws Exception { csConf.setCapacityByLabel(queueB, "x", 50); csConf.setMaximumCapacityByLabel(queueB, "x", 50); + csConf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true); + // set node -> label mgr.addToCluserNodeLabels( ImmutableSet.of(NodeLabel.newInstance("x", false))); @@ -2408,6 +2703,54 @@ public RMNodeLabelsManager createNodeLabelManager() { rm1.start(); MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB); // label = + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId()); + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); + SchedulerNode schedulerNode2 = cs.getSchedulerNode(nm2.getNodeId()); + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + } + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + } + double delta = 0.0001; + CSQueue leafQueue = cs.getQueue("a"); + CSQueue leafQueueB = cs.getQueue("b"); + CSQueue rootQueue = cs.getRootQueue(); + assertEquals(10 * GB, rootQueue.getMetrics().getAvailableMB(), delta); + assertEquals(2.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta); + assertEquals(7.5 * GB, leafQueueB.getMetrics().getAvailableMB(), delta); + + MetricsSystem ms = leafQueueB.getMetrics().getMetricsSystem(); + QueueMetrics partXMetrics = + (QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, "x"); + QueueMetrics partDefaultMetrics = + (QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, ""); + QueueMetrics queueAMetrics = + (QueueMetrics) TestQueueMetrics.queueSource(ms, "root.a"); + QueueMetrics queueBMetrics = + (QueueMetrics) TestQueueMetrics.queueSource(ms, "root.b"); + QueueMetrics queueAPartDefaultMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.a"); + QueueMetrics queueAPartXMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.a"); + QueueMetrics queueBPartDefaultMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.b"); + QueueMetrics queueBPartXMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.b"); + QueueMetrics rootMetrics = + (QueueMetrics) TestQueueMetrics.queueSource(ms, "root"); + assertEquals(10 * GB, partXMetrics.getAvailableMB(), delta); + assertEquals(10 * GB, partDefaultMetrics.getAvailableMB(), delta); + assertEquals(2.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta); + assertEquals(7.5 * GB, queueBPartDefaultMetrics.getAvailableMB(), delta); + assertEquals(5 * GB, queueAPartXMetrics.getAvailableMB(), delta); + assertEquals(5 * GB, queueBPartXMetrics.getAvailableMB(), delta); + assertEquals(10 * GB, rootMetrics.getAvailableMB(), delta); + assertEquals(2.5 * GB, queueAMetrics.getAvailableMB(), delta); + assertEquals(7.5 * GB, queueBMetrics.getAvailableMB(), delta); + // app1 -> a MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1) @@ -2423,47 +2766,73 @@ public RMNodeLabelsManager createNodeLabelManager() { // app1 asks for 3 partition= containers am1.allocate("*", 1 * GB, 3, new ArrayList()); - // NM1 do 50 heartbeats - CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); - RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); - - SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId()); for (int i = 0; i < 50; i++) { cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); } + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + } // app1 gets all resource in partition=x (non-exclusive) Assert.assertEquals(3, schedulerNode1.getNumContainers()); - SchedulerNodeReport reportNm1 = rm1.getResourceScheduler() .getNodeReport(nm1.getNodeId()); Assert.assertEquals(3 * GB, reportNm1.getUsedResource().getMemorySize()); Assert.assertEquals(7 * GB, reportNm1.getAvailableResource().getMemorySize()); - SchedulerNodeReport reportNm2 = rm1.getResourceScheduler() .getNodeReport(nm2.getNodeId()); Assert.assertEquals(1 * GB, reportNm2.getUsedResource().getMemorySize()); Assert.assertEquals(9 * GB, reportNm2.getAvailableResource().getMemorySize()); - - LeafQueue leafQueue = (LeafQueue) cs.getQueue("a"); - // 3GB is used from label x quota. 1.5 GB is remaining from default label. - // 2GB is remaining from label x. - assertEquals(15 * GB / 10, leafQueue.getMetrics().getAvailableMB()); + assertEquals(7 * GB, partXMetrics.getAvailableMB(), delta); + assertEquals(9 * GB, partDefaultMetrics.getAvailableMB(), delta); + assertEquals(1.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta); + assertEquals(1 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta); + assertEquals(7.5 * GB, queueBPartDefaultMetrics.getAvailableMB(), delta); + assertEquals(2 * GB, queueAPartXMetrics.getAvailableMB(), delta); + assertEquals(3 * GB, queueAPartXMetrics.getAllocatedMB(), delta); + assertEquals(5 * GB, queueBPartXMetrics.getAvailableMB(), delta); + assertEquals(1 * GB, queueAMetrics.getAllocatedMB(), delta); + assertEquals(1.5 * GB, queueAMetrics.getAvailableMB(), delta); + assertEquals(0 * GB, queueBMetrics.getAllocatedMB(), delta); + assertEquals(7.5 * GB, queueBMetrics.getAvailableMB(), delta); + assertEquals(0 * GB, queueAMetrics.getPendingMB(), delta); + assertEquals(0 * GB, queueAPartDefaultMetrics.getPendingMB(), delta); + assertEquals(0 * GB, queueAPartXMetrics.getPendingMB(), delta); + assertEquals(0 * GB, queueBPartDefaultMetrics.getPendingMB(), delta); + assertEquals(0 * GB, queueBPartXMetrics.getPendingMB(), delta); + assertEquals(1.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta); assertEquals(1 * GB, leafQueue.getMetrics().getAllocatedMB()); + assertEquals(3 * GB, partXMetrics.getAllocatedMB(), delta); + assertEquals(1 * GB, partDefaultMetrics.getAllocatedMB(), delta); + + QueueMetrics partDefaultQueueAUserMetrics = + (QueueMetrics) TestPartitionQueueMetrics.userSource(ms, "", "user", + "root.a"); + QueueMetrics partXQueueAUserMetrics = + (QueueMetrics) TestPartitionQueueMetrics.userSource(ms, "x", "user", + "root.a"); + QueueMetrics queueAUserMetrics = + (QueueMetrics) TestQueueMetrics.userSource(ms, "root.a", "user"); + + assertEquals(2 * GB, queueAUserMetrics.getAvailableMB(), delta); + assertEquals(1 * GB, queueAUserMetrics.getAllocatedMB(), delta); + assertEquals(1.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta); + assertEquals(1 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta); + assertEquals(2 * GB, queueAPartXMetrics.getAvailableMB(), delta); + assertEquals(3 * GB, queueAPartXMetrics.getAllocatedMB(), delta); + assertEquals(2 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), delta); + assertEquals(1 * GB, partDefaultQueueAUserMetrics.getAllocatedMB(), delta); + assertEquals(2 * GB, partXQueueAUserMetrics.getAvailableMB(), delta); + assertEquals(3 * GB, partXQueueAUserMetrics.getAllocatedMB(), delta); - // app1 asks for 1 default partition container am1.allocate("*", 1 * GB, 5, new ArrayList()); - // NM2 do couple of heartbeats - RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); - - SchedulerNode schedulerNode2 = cs.getSchedulerNode(nm2.getNodeId()); cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); - // app1 gets all resource in default partition Assert.assertEquals(2, schedulerNode2.getNumContainers()); + Assert.assertEquals(3, schedulerNode1.getNumContainers()); // 3GB is used from label x quota. 2GB used from default label. // So 0.5 GB is remaining from default label. @@ -2472,10 +2841,100 @@ public RMNodeLabelsManager createNodeLabelManager() { // The total memory tracked by QueueMetrics is 10GB // for the default partition - CSQueue rootQueue = cs.getRootQueue(); assertEquals(10*GB, rootQueue.getMetrics().getAvailableMB() + rootQueue.getMetrics().getAllocatedMB()); + assertEquals(0.5 * GB, queueAMetrics.getAvailableMB(), delta); + assertEquals(2 * GB, queueAMetrics.getAllocatedMB()); + assertEquals(0.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta); + assertEquals(2 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta); + assertEquals(2 * GB, queueAPartXMetrics.getAvailableMB(), delta); + assertEquals(3 * GB, queueAPartXMetrics.getAllocatedMB(), delta); + assertEquals(1 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), + delta); + assertEquals(2 * GB, partDefaultQueueAUserMetrics.getAllocatedMB(), delta); + assertEquals(2 * GB, partXQueueAUserMetrics.getAvailableMB(), delta); + assertEquals(3 * GB, partXQueueAUserMetrics.getAllocatedMB(), delta); + assertEquals(1 * GB, queueAUserMetrics.getAvailableMB(), delta); + assertEquals(2 * GB, queueAUserMetrics.getAllocatedMB(), delta); + assertEquals(7 * GB, partXMetrics.getAvailableMB(), delta); + assertEquals(3 * GB, partXMetrics.getAllocatedMB(), delta); + assertEquals(8 * GB, partDefaultMetrics.getAvailableMB(), delta); + assertEquals(2 * GB, partDefaultMetrics.getAllocatedMB(), delta); + + // Pending Resources when containers are waiting on "default" partition + assertEquals(4 * GB, queueAMetrics.getPendingMB(), delta); + assertEquals(4 * GB, queueAPartDefaultMetrics.getPendingMB(), delta); + assertEquals(4 * GB, partDefaultQueueAUserMetrics.getPendingMB(), + delta); + assertEquals(4 * GB, queueAUserMetrics.getPendingMB(), delta); + assertEquals(4 * GB, partDefaultMetrics.getPendingMB(), delta); + assertEquals(0 * GB, queueAPartXMetrics.getPendingMB(), delta); + assertEquals(0 * GB, partXQueueAUserMetrics.getPendingMB(), delta); + assertEquals(0 * GB, partXMetrics.getPendingMB(), delta); + + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + } + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + } + + assertEquals(0.5 * GB, queueAMetrics.getAvailableMB(), delta); + assertEquals(2 * GB, queueAMetrics.getAllocatedMB()); + assertEquals(0.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta); + assertEquals(2 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta); + assertEquals(0 * GB, queueAPartXMetrics.getAvailableMB(), delta); + assertEquals(7 * GB, queueAPartXMetrics.getAllocatedMB(), delta); + assertEquals(1 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), + delta); + assertEquals(2 * GB, partDefaultQueueAUserMetrics.getAllocatedMB(), delta); + assertEquals(0 * GB, partXQueueAUserMetrics.getAvailableMB(), delta); + assertEquals(7 * GB, partXQueueAUserMetrics.getAllocatedMB(), delta); + assertEquals(1 * GB, queueAUserMetrics.getAvailableMB(), delta); + assertEquals(2 * GB, queueAUserMetrics.getAllocatedMB(), delta); + assertEquals(3 * GB, partXMetrics.getAvailableMB(), delta); + assertEquals(7 * GB, partXMetrics.getAllocatedMB(), delta); + assertEquals(8 * GB, partDefaultMetrics.getAvailableMB(), delta); + assertEquals(2 * GB, partDefaultMetrics.getAllocatedMB(), delta); + + // Pending Resources after containers has been assigned on "x" partition + assertEquals(0 * GB, queueAMetrics.getPendingMB(), delta); + assertEquals(0 * GB, queueAPartDefaultMetrics.getPendingMB(), delta); + assertEquals(0 * GB, partDefaultQueueAUserMetrics.getPendingMB(), + delta); + assertEquals(0 * GB, queueAUserMetrics.getPendingMB(), delta); + assertEquals(0 * GB, partDefaultMetrics.getPendingMB(), delta); + assertEquals(0 * GB, queueAPartXMetrics.getPendingMB(), delta); + assertEquals(0 * GB, partXQueueAUserMetrics.getPendingMB(), delta); + assertEquals(0 * GB, partXMetrics.getPendingMB(), delta); + + rm1.killApp(app1.getApplicationId()); + rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED); + + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + } + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + } + + assertEquals(10 * GB, rootQueue.getMetrics().getAvailableMB(), delta); + assertEquals(2.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta); + assertEquals(7.5 * GB, leafQueueB.getMetrics().getAvailableMB(), delta); + assertEquals(2, queueAMetrics.getAggregateAllocatedContainers()); + assertEquals(2, queueAMetrics.getAggegatedReleasedContainers()); + assertEquals(2, queueAPartDefaultMetrics.getAggregateAllocatedContainers()); + assertEquals(2, queueAPartDefaultMetrics.getAggegatedReleasedContainers()); + assertEquals(7, partXMetrics.getAggregateAllocatedContainers()); + assertEquals(2, partDefaultMetrics.getAggregateAllocatedContainers()); + assertEquals(7, queueAPartXMetrics.getAggregateAllocatedContainers()); + assertEquals(7, queueAPartXMetrics.getAggegatedReleasedContainers()); + assertEquals(2.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta); + assertEquals(5 * GB, queueAPartXMetrics.getAvailableMB(), delta); + assertEquals(3 * GB, queueAUserMetrics.getAvailableMB(), delta); + assertEquals(3 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), delta); + assertEquals(5 * GB, partXQueueAUserMetrics.getAvailableMB(), delta); rm1.close(); } @@ -2606,8 +3065,8 @@ public RMNodeLabelsManager createNodeLabelManager() { // The total memory tracked by QueueMetrics is 12GB // for the default partition CSQueue rootQueue = cs.getRootQueue(); - assertEquals(12*GB, rootQueue.getMetrics().getAvailableMB() + - rootQueue.getMetrics().getAllocatedMB()); + assertEquals(12 * GB, rootQueue.getMetrics().getAvailableMB() + + rootQueue.getMetrics().getAllocatedMB()); // Kill all apps in queue a cs.killAllAppsInQueue("a"); @@ -2619,6 +3078,207 @@ public RMNodeLabelsManager createNodeLabelManager() { rm1.close(); } + @Test + public void testTwoLevelQueueMetricsWithLabels() throws Exception { + + CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration( + this.conf); + + // Define top-level queues + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] {"a"}); + csConf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100); + + final String queueA = CapacitySchedulerConfiguration.ROOT + ".a"; + csConf.setCapacity(queueA, 100); + csConf.setAccessibleNodeLabels(queueA, toSet("x")); + csConf.setCapacityByLabel(queueA, "x", 100); + csConf.setMaximumCapacityByLabel(queueA, "x", 100); + + csConf.setQueues(queueA, new String[] {"a1"}); + final String queueA1 = queueA + ".a1"; + csConf.setCapacity(queueA1, 100); + + csConf.setAccessibleNodeLabels(queueA1, toSet("x")); + csConf.setCapacityByLabel(queueA1, "x", 100); + csConf.setMaximumCapacityByLabel(queueA1, "x", 100); + + // set node -> label + // label x exclusivity is set to true + mgr.addToCluserNodeLabels( + ImmutableSet.of(NodeLabel.newInstance("x", true))); + mgr.addLabelsToNode( + ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM(csConf) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 12 * GB); // label = + + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + ParentQueue leafQueueA = (ParentQueue) cs.getQueue("a"); + LeafQueue leafQueueA1 = (LeafQueue) cs.getQueue("a1"); + assertEquals(12 * GB, leafQueueA1.getMetrics().getAvailableMB()); + assertEquals(0 * GB, leafQueueA1.getMetrics().getAllocatedMB()); + MetricsSystem ms = leafQueueA1.getMetrics().getMetricsSystem(); + QueueMetrics partXMetrics = + (QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, "x"); + QueueMetrics partDefaultMetrics = + (QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, ""); + QueueMetrics queueAPartDefaultMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.a"); + QueueMetrics queueAPartXMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.a"); + QueueMetrics queueA1PartDefaultMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.a.a1"); + QueueMetrics queueA1PartXMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.a.a1"); + QueueMetrics queueRootPartDefaultMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root"); + QueueMetrics queueRootPartXMetrics = + (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root"); + QueueMetrics queueAMetrics = + (QueueMetrics) TestQueueMetrics.queueSource(ms, "root.a"); + QueueMetrics queueA1Metrics = + (QueueMetrics) TestQueueMetrics.queueSource(ms, "root.a.a1"); + QueueMetrics queueRootMetrics = + (QueueMetrics) TestQueueMetrics.queueSource(ms, "root"); + assertEquals(12 * GB, queueAMetrics.getAvailableMB()); + assertEquals(12 * GB, queueA1Metrics.getAvailableMB()); + assertEquals(12 * GB, queueRootMetrics.getAvailableMB()); + assertEquals(12 * GB, leafQueueA.getMetrics().getAvailableMB()); + assertEquals(10 * GB, queueA1PartXMetrics.getAvailableMB()); + assertEquals(10 * GB, queueAPartXMetrics.getAvailableMB()); + assertEquals(10 * GB, queueRootPartXMetrics.getAvailableMB()); + assertEquals(12 * GB, queueA1PartDefaultMetrics.getAvailableMB()); + assertEquals(12 * GB, queueAPartDefaultMetrics.getAvailableMB()); + assertEquals(12 * GB, queueRootPartDefaultMetrics.getAvailableMB()); + assertEquals(10 * GB, partXMetrics.getAvailableMB()); + assertEquals(12 * GB, partDefaultMetrics.getAvailableMB()); + + // app1 -> a + RMApp app1 = MockRMAppSubmitter.submit(rm1, + MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1) + .withAppName("app") + .withUser("user") + .withAcls(null) + .withQueue("a1") + .withAmLabel("x") + .build()); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // app1 asks for 5 partition=x containers + am1.allocate("*", 1 * GB, 5, new ArrayList(), "x"); + // NM1 do 50 heartbeats + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + + SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId()); + + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + } + + // app1 gets all resource in partition=x + Assert.assertEquals(6, schedulerNode1.getNumContainers()); + + SchedulerNodeReport reportNm1 = rm1.getResourceScheduler() + .getNodeReport(nm1.getNodeId()); + Assert.assertEquals(6 * GB, reportNm1.getUsedResource().getMemorySize()); + Assert.assertEquals(4 * GB, reportNm1.getAvailableResource().getMemorySize()); + + SchedulerNodeReport reportNm2 = rm1.getResourceScheduler() + .getNodeReport(nm2.getNodeId()); + Assert.assertEquals(0 * GB, reportNm2.getUsedResource().getMemorySize()); + Assert.assertEquals(12 * GB, + reportNm2.getAvailableResource().getMemorySize()); + + assertEquals(0 * GB, queueAMetrics.getAllocatedMB()); + assertEquals(0 * GB, queueA1Metrics.getAllocatedMB()); + assertEquals(0 * GB, queueRootMetrics.getAllocatedMB()); + assertEquals(0 * GB, leafQueueA.getMetrics().getAllocatedMB()); + assertEquals(0 * GB, leafQueueA.getMetrics().getAllocatedMB()); + assertEquals(6 * GB, queueA1PartXMetrics.getAllocatedMB()); + assertEquals(6 * GB, queueAPartXMetrics.getAllocatedMB()); + assertEquals(6 * GB, queueRootPartXMetrics.getAllocatedMB()); + assertEquals(0 * GB, queueA1PartDefaultMetrics.getAllocatedMB()); + assertEquals(0 * GB, queueAPartDefaultMetrics.getAllocatedMB()); + assertEquals(0 * GB, queueRootPartDefaultMetrics.getAllocatedMB()); + assertEquals(6 * GB, partXMetrics.getAllocatedMB()); + assertEquals(0 * GB, partDefaultMetrics.getAllocatedMB()); + + // app2 -> a + RMApp app2 = MockRMAppSubmitter.submit(rm1, + MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1) + .withAppName("app") + .withUser("user") + .withAcls(null) + .withQueue("a1") + .withAmLabel("") + .build()); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); + + // app2 asks for 5 partition= containers + am2.allocate("*", 1 * GB, 5, new ArrayList(), ""); + // NM2 do 50 heartbeats + RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId()); + + SchedulerNode schedulerNode2 = cs.getSchedulerNode(nm2.getNodeId()); + + for (int i = 0; i < 50; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); + } + + // app1 gets all resource in partition=x + Assert.assertEquals(6, schedulerNode2.getNumContainers()); + + reportNm1 = rm1.getResourceScheduler().getNodeReport(nm1.getNodeId()); + Assert.assertEquals(6 * GB, reportNm1.getUsedResource().getMemorySize()); + Assert.assertEquals(4 * GB, + reportNm1.getAvailableResource().getMemorySize()); + + reportNm2 = rm1.getResourceScheduler().getNodeReport(nm2.getNodeId()); + Assert.assertEquals(6 * GB, reportNm2.getUsedResource().getMemorySize()); + Assert.assertEquals(6 * GB, + reportNm2.getAvailableResource().getMemorySize()); + + assertEquals(6 * GB, leafQueueA.getMetrics().getAvailableMB()); + assertEquals(6 * GB, leafQueueA.getMetrics().getAllocatedMB()); + + // The total memory tracked by QueueMetrics is 12GB + // for the default partition + CSQueue rootQueue = cs.getRootQueue(); + assertEquals(12 * GB, rootQueue.getMetrics().getAvailableMB() + + rootQueue.getMetrics().getAllocatedMB()); + + assertEquals(6 * GB, queueAMetrics.getAllocatedMB()); + assertEquals(6 * GB, queueA1Metrics.getAllocatedMB()); + assertEquals(6 * GB, queueRootMetrics.getAllocatedMB()); + assertEquals(6 * GB, queueA1PartXMetrics.getAllocatedMB()); + assertEquals(6 * GB, queueAPartXMetrics.getAllocatedMB()); + assertEquals(6 * GB, queueRootPartXMetrics.getAllocatedMB()); + assertEquals(6 * GB, queueA1PartDefaultMetrics.getAllocatedMB()); + assertEquals(6 * GB, queueAPartDefaultMetrics.getAllocatedMB()); + assertEquals(6 * GB, queueRootPartDefaultMetrics.getAllocatedMB()); + assertEquals(6 * GB, partXMetrics.getAllocatedMB()); + assertEquals(6 * GB, partDefaultMetrics.getAllocatedMB()); + + // Kill all apps in queue a + cs.killAllAppsInQueue("a1"); + rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED); + rm1.waitForAppRemovedFromScheduler(app1.getApplicationId()); + assertEquals(0 * GB, leafQueueA.getMetrics().getUsedAMResourceMB()); + assertEquals(0, leafQueueA.getMetrics().getUsedAMResourceVCores()); + rm1.close(); + } + @Test public void testQueueMetricsWithLabelsDisableElasticity() throws Exception { /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index 0560d595a6323..9ed0388aec1a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -108,15 +108,17 @@ public void setUp() throws Exception { private static final String A = "a"; private static final String B = "b"; + private static final String Q_A = + CapacitySchedulerConfiguration.ROOT + "." + A; + private static final String Q_B = + CapacitySchedulerConfiguration.ROOT + "." + B; private void setupSingleLevelQueues(CapacitySchedulerConfiguration conf) { // Define top-level queues conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {A, B}); - final String Q_A = CapacitySchedulerConfiguration.ROOT + "." + A; conf.setCapacity(Q_A, 30); - final String Q_B = CapacitySchedulerConfiguration.ROOT + "." + B; conf.setCapacity(Q_B, 70); LOG.info("Setup top-level queues a and b"); @@ -128,11 +130,9 @@ private void setupSingleLevelQueuesWithAbsoluteResource( // Define top-level queues conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[]{A, B}); - final String Q_A = CapacitySchedulerConfiguration.ROOT + "." + A; conf.setMinimumResourceRequirement("", Q_A, QUEUE_A_RESOURCE); - final String Q_B = CapacitySchedulerConfiguration.ROOT + "." + B; conf.setMinimumResourceRequirement("", Q_B, QUEUE_B_RESOURCE); @@ -368,9 +368,7 @@ public void testSingleLevelQueues() throws Exception { public void testSingleLevelQueuesPrecision() throws Exception { // Setup queue configs setupSingleLevelQueues(csConf); - final String Q_A = CapacitySchedulerConfiguration.ROOT + "." + "a"; csConf.setCapacity(Q_A, 30); - final String Q_B = CapacitySchedulerConfiguration.ROOT + "." + "b"; csConf.setCapacity(Q_B, 70.5F); CSQueueStore queues = new CSQueueStore(); @@ -434,10 +432,8 @@ private void setupMultiLevelQueues(CapacitySchedulerConfiguration conf) { // Define top-level queues csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {A, B, C, D}); - final String Q_A = CapacitySchedulerConfiguration.ROOT + "." + A; conf.setCapacity(Q_A, 10); - final String Q_B = CapacitySchedulerConfiguration.ROOT + "." + B; conf.setCapacity(Q_B, 50); final String Q_C = CapacitySchedulerConfiguration.ROOT + "." + C; @@ -656,7 +652,6 @@ public void testQueueCapacitySettingChildZero() throws Exception { setupMultiLevelQueues(csConf); // set child queues capacity to 0 when parents not 0 - final String Q_B = CapacitySchedulerConfiguration.ROOT + "." + B; csConf.setCapacity(Q_B + "." + B1, 0); csConf.setCapacity(Q_B + "." + B2, 0); csConf.setCapacity(Q_B + "." + B3, 0); @@ -673,9 +668,7 @@ public void testQueueCapacitySettingParentZero() throws Exception { setupMultiLevelQueues(csConf); // set parent capacity to 0 when child not 0 - final String Q_B = CapacitySchedulerConfiguration.ROOT + "." + B; csConf.setCapacity(Q_B, 0); - final String Q_A = CapacitySchedulerConfiguration.ROOT + "." + A; csConf.setCapacity(Q_A, 60); CSQueueStore queues = new CSQueueStore(); @@ -690,13 +683,11 @@ public void testQueueCapacityZero() throws Exception { setupMultiLevelQueues(csConf); // set parent and child capacity to 0 - final String Q_B = CapacitySchedulerConfiguration.ROOT + "." + B; csConf.setCapacity(Q_B, 0); csConf.setCapacity(Q_B + "." + B1, 0); csConf.setCapacity(Q_B + "." + B2, 0); csConf.setCapacity(Q_B + "." + B3, 0); - final String Q_A = CapacitySchedulerConfiguration.ROOT + "." + A; csConf.setCapacity(Q_A, 60); CSQueueStore queues = new CSQueueStore(); @@ -1029,10 +1020,125 @@ public void testAbsoluteResourceWithChangeInClusterResource() QUEUE_B_RESOURCE_70PERC); } + @Test + public void testDeriveCapacityFromAbsoluteConfigurations() throws Exception { + // Setup queue configs + setupSingleLevelQueuesWithAbsoluteResource(csConf); + + CSQueueStore queues = new CSQueueStore(); + CSQueue root = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, + null, CapacitySchedulerConfiguration.ROOT, queues, queues, + TestUtils.spyHook); + + // Setup some nodes + int numNodes = 2; + final long memoryPerNode = (QUEUE_A_RESOURCE.getMemorySize() + + QUEUE_B_RESOURCE.getMemorySize()) / numNodes; + int coresPerNode = (QUEUE_A_RESOURCE.getVirtualCores() + + QUEUE_B_RESOURCE.getVirtualCores()) / numNodes; + + Resource clusterResource = Resources.createResource( + numNodes * memoryPerNode, numNodes * coresPerNode); + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + root.updateClusterResource(clusterResource, + new ResourceLimits(clusterResource)); + + // Start testing + // Only MaximumSystemApplications is set in csConf + LeafQueue a = (LeafQueue) queues.get(A); + LeafQueue b = (LeafQueue) queues.get(B); + + float queueAScale = (float) QUEUE_A_RESOURCE.getMemorySize() / + (float) clusterResource.getMemorySize(); + float queueBScale = (float) QUEUE_B_RESOURCE.getMemorySize() / + (float) clusterResource.getMemorySize(); + + assertEquals(queueAScale, a.getQueueCapacities().getCapacity(), + DELTA); + assertEquals(1f, a.getQueueCapacities().getMaximumCapacity(), + DELTA); + assertEquals(queueAScale, a.getQueueCapacities().getAbsoluteCapacity(), + DELTA); + assertEquals(1f, + a.getQueueCapacities().getAbsoluteMaximumCapacity(), DELTA); + assertEquals((int) (csConf.getMaximumSystemApplications() * queueAScale), + a.getMaxApplications()); + assertEquals(a.getMaxApplications(), a.getMaxApplicationsPerUser()); + + assertEquals(queueBScale, + b.getQueueCapacities().getCapacity(), DELTA); + assertEquals(1f, + b.getQueueCapacities().getMaximumCapacity(), DELTA); + assertEquals(queueBScale, + b.getQueueCapacities().getAbsoluteCapacity(), DELTA); + assertEquals(1f, + b.getQueueCapacities().getAbsoluteMaximumCapacity(), DELTA); + assertEquals((int) (csConf.getMaximumSystemApplications() * queueBScale), + b.getMaxApplications()); + assertEquals(b.getMaxApplications(), b.getMaxApplicationsPerUser()); + + // Set GlobalMaximumApplicationsPerQueue in csConf + csConf.setGlobalMaximumApplicationsPerQueue(20000); + root.updateClusterResource(clusterResource, + new ResourceLimits(clusterResource)); + + assertEquals((int) (csConf.getGlobalMaximumApplicationsPerQueue() * + queueAScale), a.getMaxApplications()); + assertEquals(a.getMaxApplications(), a.getMaxApplicationsPerUser()); + assertEquals((int) (csConf.getGlobalMaximumApplicationsPerQueue() * + queueBScale), b.getMaxApplications()); + assertEquals(b.getMaxApplications(), b.getMaxApplicationsPerUser()); + + // Set MaximumApplicationsPerQueue in csConf + int queueAMaxApplications = 30000; + int queueBMaxApplications = 30000; + csConf.set("yarn.scheduler.capacity." + Q_A + ".maximum-applications", + Integer.toString(queueAMaxApplications)); + csConf.set("yarn.scheduler.capacity." + Q_B + ".maximum-applications", + Integer.toString(queueBMaxApplications)); + root.updateClusterResource(clusterResource, + new ResourceLimits(clusterResource)); + + assertEquals(queueAMaxApplications, a.getMaxApplications()); + assertEquals(a.getMaxApplications(), a.getMaxApplicationsPerUser()); + assertEquals(queueBMaxApplications, b.getMaxApplications()); + assertEquals(b.getMaxApplications(), b.getMaxApplicationsPerUser()); + + // Extra cases for testing maxApplicationsPerUser + int halfPercent = 50; + int oneAndQuarterPercent = 125; + a.getUsersManager().setUserLimit(halfPercent); + b.getUsersManager().setUserLimit(oneAndQuarterPercent); + root.updateClusterResource(clusterResource, + new ResourceLimits(clusterResource)); + + assertEquals(a.getMaxApplications() * halfPercent / 100, + a.getMaxApplicationsPerUser()); + // Q_B's limit per user shouldn't be greater + // than the whole queue's application limit + assertEquals(b.getMaxApplications(), b.getMaxApplicationsPerUser()); + + float userLimitFactorQueueA = 0.9f; + float userLimitFactorQueueB = 1.1f; + a.getUsersManager().setUserLimit(halfPercent); + a.getUsersManager().setUserLimitFactor(userLimitFactorQueueA); + b.getUsersManager().setUserLimit(100); + b.getUsersManager().setUserLimitFactor(userLimitFactorQueueB); + root.updateClusterResource(clusterResource, + new ResourceLimits(clusterResource)); + + assertEquals((int) (a.getMaxApplications() * halfPercent * + userLimitFactorQueueA / 100), a.getMaxApplicationsPerUser()); + // Q_B's limit per user shouldn't be greater + // than the whole queue's application limit + assertEquals(b.getMaxApplications(), b.getMaxApplicationsPerUser()); + + } + @After public void tearDown() throws Exception { } - + private ResourceLimits anyResourceLimits() { return any(ResourceLimits.class); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java index 2e7009eae65c0..039b9da8aaa52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java @@ -100,6 +100,33 @@ public void testQueueMappingTrimSpaces() throws IOException { .build()); } + @Test + public void testQueueMappingPathParsing() { + QueueMapping leafOnly = QueueMapping.QueueMappingBuilder.create() + .parsePathString("leaf") + .build(); + + Assert.assertEquals("leaf", leafOnly.getQueue()); + Assert.assertEquals(null, leafOnly.getParentQueue()); + Assert.assertEquals("leaf", leafOnly.getFullPath()); + + QueueMapping twoLevels = QueueMapping.QueueMappingBuilder.create() + .parsePathString("root.leaf") + .build(); + + Assert.assertEquals("leaf", twoLevels.getQueue()); + Assert.assertEquals("root", twoLevels.getParentQueue()); + Assert.assertEquals("root.leaf", twoLevels.getFullPath()); + + QueueMapping deep = QueueMapping.QueueMappingBuilder.create() + .parsePathString("root.a.b.c.d.e.leaf") + .build(); + + Assert.assertEquals("leaf", deep.getQueue()); + Assert.assertEquals("root.a.b.c.d.e", deep.getParentQueue()); + Assert.assertEquals("root.a.b.c.d.e.leaf", deep.getFullPath()); + } + @Test (timeout = 60000) public void testQueueMappingParsingInvalidCases() throws Exception { // configuration parsing tests - negative test cases diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java index 093cea59ded52..c1f48be96a3ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java @@ -43,7 +43,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestQueueParsing { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java index 9997335f20813..aa3b5919fcc60 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java @@ -157,6 +157,23 @@ public void testQueueStateTransit() throws Exception { Assert.assertEquals(QueueState.DRAINING, cs.getQueue(Q2).getState()); Assert.assertEquals(QueueState.RUNNING, cs.getQueue(Q3).getState()); + // set Q2 state to RUNNING and do reinitialize. + // Q2 should transit from DRAINING to RUNNING + csConf.setState(Q2_PATH, QueueState.RUNNING); + conf = new YarnConfiguration(csConf); + cs.reinitialize(conf, rmContext); + Assert.assertEquals(QueueState.RUNNING, cs.getQueue(Q1).getState()); + Assert.assertEquals(QueueState.RUNNING, cs.getQueue(Q2).getState()); + Assert.assertEquals(QueueState.RUNNING, cs.getQueue(Q3).getState()); + + // set Q2 state to stop and do reinitialize. + csConf.setState(Q2_PATH, QueueState.STOPPED); + conf = new YarnConfiguration(csConf); + cs.reinitialize(conf, rmContext); + Assert.assertEquals(QueueState.RUNNING, cs.getQueue(Q1).getState()); + Assert.assertEquals(QueueState.DRAINING, cs.getQueue(Q2).getState()); + Assert.assertEquals(QueueState.RUNNING, cs.getQueue(Q3).getState()); + // set Q1 state to stop and do reinitialize. csConf.setState(Q1_PATH, QueueState.STOPPED); conf = new YarnConfiguration(csConf); @@ -201,6 +218,7 @@ private FiCaSchedulerApp getMockApplication(ApplicationId appId, String user, CommonNodeLabelsManager.NO_LABEL); when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))) .thenCallRealMethod(); + when(application.isRunnable()).thenReturn(true); return application; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueStateManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueStateManager.java index a4c1300df37f6..e893717a8dd0f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueStateManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueStateManager.java @@ -157,6 +157,7 @@ private FiCaSchedulerApp getMockApplication(ApplicationId appId, String user, CommonNodeLabelsManager.NO_LABEL); when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))) .thenCallRealMethod(); + when(application.isRunnable()).thenReturn(true); return application; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index 491541c64c74d..1168f648024b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -31,7 +31,7 @@ import java.util.Collections; import java.util.Map; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.test.GenericTestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -105,7 +105,6 @@ public void setUp() throws Exception { CapacityScheduler spyCs = new CapacityScheduler(); cs = spy(spyCs); rmContext = TestUtils.getMockRMContext(); - } private void setup(CapacitySchedulerConfiguration csConf) throws Exception { @@ -114,6 +113,9 @@ private void setup(CapacitySchedulerConfiguration csConf) throws Exception { private void setup(CapacitySchedulerConfiguration csConf, boolean addUserLimits) throws Exception { + //All stub calls on the spy object of the 'cs' field should happen + //before cs.start() is invoked. See YARN-10672 for more details. + when(cs.getNumClusterNodes()).thenReturn(3); csConf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true); final String newRoot = "root" + System.currentTimeMillis(); @@ -156,8 +158,6 @@ private void setup(CapacitySchedulerConfiguration csConf, cs.setRMContext(spyRMContext); cs.init(csConf); cs.start(); - - when(cs.getNumClusterNodes()).thenReturn(3); } private static final String A = "a"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java index 4615e5157ed48..a4248c5dae584 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.records.ExecutionType; @@ -862,7 +862,7 @@ public void testInterAppConstraintsWithNamespaces() throws Exception { try { rm.start(); - MockNM nm1 = rm.registerNode("192.168.0.1:1234:", 100*GB, 100); + MockNM nm1 = rm.registerNode("192.168.0.1:1234", 100*GB, 100); MockNM nm2 = rm.registerNode("192.168.0.2:1234", 100*GB, 100); MockNM nm3 = rm.registerNode("192.168.0.3:1234", 100*GB, 100); MockNM nm4 = rm.registerNode("192.168.0.4:1234", 100*GB, 100); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java index 785d244139e39..45a427b380b19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.Priority; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 44b445156dde9..357b19f53b28c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java index 8c2856d9df8fb..1259f385bfa83 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java @@ -50,9 +50,9 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestWorkPreservingRMRestartForNodeLabel { private Configuration conf; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java index 3a8c362812c85..2c363622d2157 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java @@ -78,13 +78,24 @@ public void testNullConfigurationUpdate() throws Exception { confStore.close(); } - YarnConfigurationStore.LogMutation prepareLogMutation(String key, - String value) + YarnConfigurationStore.LogMutation prepareLogMutation(String... values) throws Exception { - Map update = new HashMap<>(); - update.put(key, value); + Map updates = new HashMap<>(); + String key; + String value; + + if (values.length % 2 != 0) { + throw new IllegalArgumentException("The number of parameters should be " + + "even."); + } + + for (int i = 1; i <= values.length; i += 2) { + key = values[i - 1]; + value = values[i]; + updates.put(key, value); + } YarnConfigurationStore.LogMutation mutation = - new YarnConfigurationStore.LogMutation(update, TEST_USER); + new YarnConfigurationStore.LogMutation(updates, TEST_USER); confStore.logMutation(mutation); return mutation; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/PersistentConfigurationStoreBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/PersistentConfigurationStoreBaseTest.java index 169c36dce993f..5037921b18655 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/PersistentConfigurationStoreBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/PersistentConfigurationStoreBaseTest.java @@ -25,6 +25,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; +import static org.junit.Assume.assumeFalse; /** * Base class for the persistent {@link YarnConfigurationStore} @@ -94,6 +95,9 @@ public void testVersion() throws Exception { @Test public void testMaxLogs() throws Exception { + assumeFalse("test should be skipped for TestFSSchedulerConfigurationStore", + this instanceof TestFSSchedulerConfigurationStore); + conf.setLong(YarnConfiguration.RM_SCHEDCONF_MAX_LOGS, 2); confStore.initialize(conf, schedConf, rmContext); LinkedList logs = confStore.getLogs(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java index f5da5acb66b36..5897741ca9322 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java @@ -20,7 +20,6 @@ import java.io.File; import java.io.IOException; -import java.util.HashMap; import java.util.Map; import org.apache.commons.io.FileUtils; @@ -31,36 +30,34 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation; +import org.apache.hadoop.yarn.server.records.Version; import org.hamcrest.CoreMatchers; import org.junit.After; import org.junit.Before; import org.junit.Test; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; import static org.junit.Assert.assertThat; /** * Tests {@link FSSchedulerConfigurationStore}. */ -public class TestFSSchedulerConfigurationStore { - private static final String TEST_USER = "test"; - private FSSchedulerConfigurationStore configurationStore; - private Configuration conf; +public class TestFSSchedulerConfigurationStore extends + PersistentConfigurationStoreBaseTest { private File testSchedulerConfigurationDir; @Before + @Override public void setUp() throws Exception { - configurationStore = new FSSchedulerConfigurationStore(); + super.setUp(); testSchedulerConfigurationDir = new File( TestFSSchedulerConfigurationStore.class.getResource("").getPath() + FSSchedulerConfigurationStore.class.getSimpleName()); testSchedulerConfigurationDir.mkdirs(); - conf = new Configuration(); conf.set(YarnConfiguration.SCHEDULER_CONFIGURATION_FS_PATH, testSchedulerConfigurationDir.getAbsolutePath()); } @@ -81,34 +78,41 @@ public void tearDown() throws Exception { FileUtils.deleteDirectory(testSchedulerConfigurationDir); } + @Test + public void checkVersion() { + try { + confStore.checkVersion(); + } catch (Exception e) { + fail("checkVersion throw exception"); + } + } + @Test public void confirmMutationWithValid() throws Exception { conf.setInt( - YarnConfiguration.SCHEDULER_CONFIGURATION_FS_MAX_VERSION, 2); + YarnConfiguration.SCHEDULER_CONFIGURATION_FS_MAX_VERSION, 2); conf.set("a", "a"); conf.set("b", "b"); conf.set("c", "c"); writeConf(conf); - configurationStore.initialize(conf, conf, null); - Configuration storeConf = configurationStore.retrieve(); + confStore.initialize(conf, conf, null); + Configuration storeConf = confStore.retrieve(); compareConfig(conf, storeConf); Configuration expectConfig = new Configuration(conf); expectConfig.unset("a"); expectConfig.set("b", "bb"); - prepareParameterizedLogMutation(configurationStore, true, - "a", null, "b", "bb"); - storeConf = configurationStore.retrieve(); + confStore.confirmMutation(prepareLogMutation("a", null, "b", "bb"), true); + storeConf = confStore.retrieve(); assertNull(storeConf.get("a")); assertEquals("bb", storeConf.get("b")); assertEquals("c", storeConf.get("c")); compareConfig(expectConfig, storeConf); - prepareParameterizedLogMutation(configurationStore, true, - "a", null, "b", "bbb"); - storeConf = configurationStore.retrieve(); + confStore.confirmMutation(prepareLogMutation("a", null, "b", "bbb"), true); + storeConf = confStore.retrieve(); assertNull(storeConf.get("a")); assertEquals("bbb", storeConf.get("b")); assertEquals("c", storeConf.get("c")); @@ -120,17 +124,51 @@ public void confirmMutationWithInvalid() throws Exception { conf.set("b", "b"); conf.set("c", "c"); writeConf(conf); - configurationStore.initialize(conf, conf, null); - Configuration storeConf = configurationStore.retrieve(); + confStore.initialize(conf, conf, null); + Configuration storeConf = confStore.retrieve(); compareConfig(conf, storeConf); - prepareParameterizedLogMutation(configurationStore, false, - "a", null, "b", "bb"); - storeConf = configurationStore.retrieve(); + confStore.confirmMutation(prepareLogMutation("a", null, "b", "bb"), false); + storeConf = confStore.retrieve(); compareConfig(conf, storeConf); } + @Test + public void testConfigRetrieval() throws Exception { + Configuration schedulerConf = new Configuration(); + schedulerConf.set("a", "a"); + schedulerConf.setLong("long", 1L); + schedulerConf.setBoolean("boolean", true); + writeConf(schedulerConf); + + confStore.initialize(conf, conf, null); + Configuration storedConfig = confStore.retrieve(); + + compareConfig(schedulerConf, storedConfig); + } + + @Test + public void testFormatConfiguration() throws Exception { + Configuration persistedSchedConf = new Configuration(); + persistedSchedConf.set("a", "a"); + writeConf(persistedSchedConf); + confStore.initialize(conf, conf, null); + Configuration storedConfig = confStore.retrieve(); + assertEquals("Retrieved config should match the stored one", "a", + storedConfig.get("a")); + confStore.format(); + try { + confStore.retrieve(); + fail("Expected an IOException with message containing \"no capacity " + + "scheduler file in\" to be thrown"); + } catch (IOException e) { + assertThat("Exception message should contain the predefined string.", + e.getMessage(), + CoreMatchers.containsString("no capacity scheduler file in")); + } + } + @Test public void testFileSystemClose() throws Exception { MiniDFSCluster hdfsCluster = null; @@ -146,18 +184,15 @@ public void testFileSystemClose() throws Exception { fs.mkdirs(path); } - FSSchedulerConfigurationStore configStore = - new FSSchedulerConfigurationStore(); hdfsConfig.set(YarnConfiguration.SCHEDULER_CONFIGURATION_FS_PATH, path.toString()); - configStore.initialize(hdfsConfig, hdfsConfig, null); + confStore.initialize(hdfsConfig, hdfsConfig, null); // Close the FileSystem object and validate fs.close(); try { - prepareParameterizedLogMutation(configStore, true, - "testkey", "testvalue"); + confStore.confirmMutation(prepareLogMutation("key", "val"), true); } catch (IOException e) { if (e.getMessage().contains("Filesystem closed")) { fail("FSSchedulerConfigurationStore failed to handle " + @@ -176,50 +211,8 @@ public void testFileSystemClose() throws Exception { } } - @Test - public void testFormatConfiguration() throws Exception { - Configuration schedulerConf = new Configuration(); - schedulerConf.set("a", "a"); - writeConf(schedulerConf); - configurationStore.initialize(conf, conf, null); - Configuration storedConfig = configurationStore.retrieve(); - assertEquals("a", storedConfig.get("a")); - configurationStore.format(); - try { - configurationStore.retrieve(); - fail("Expected an IOException with message containing \"no capacity " + - "scheduler file in\" to be thrown"); - } catch (IOException e) { - assertThat(e.getMessage(), - CoreMatchers.containsString("no capacity scheduler file in")); - } - } - - @Test - public void retrieve() throws Exception { - Configuration schedulerConf = new Configuration(); - schedulerConf.set("a", "a"); - schedulerConf.setLong("long", 1L); - schedulerConf.setBoolean("boolean", true); - writeConf(schedulerConf); - - configurationStore.initialize(conf, conf, null); - Configuration storedConfig = configurationStore.retrieve(); - - compareConfig(schedulerConf, storedConfig); - } - - @Test - public void checkVersion() { - try { - configurationStore.checkVersion(); - } catch (Exception e) { - fail("checkVersion throw exception"); - } - } - private void compareConfig(Configuration schedulerConf, - Configuration storedConfig) { + Configuration storedConfig) { for (Map.Entry entry : schedulerConf) { assertEquals(entry.getKey(), schedulerConf.get(entry.getKey()), storedConfig.get(entry.getKey())); @@ -231,26 +224,13 @@ private void compareConfig(Configuration schedulerConf, } } - private void prepareParameterizedLogMutation( - FSSchedulerConfigurationStore configStore, - boolean validityFlag, String... values) throws Exception { - Map updates = new HashMap<>(); - String key; - String value; - - if (values.length % 2 != 0) { - throw new IllegalArgumentException("The number of parameters should be " + - "even."); - } - - for (int i = 1; i <= values.length; i += 2) { - key = values[i - 1]; - value = values[i]; - updates.put(key, value); - } + @Override + public YarnConfigurationStore createConfStore() { + return new FSSchedulerConfigurationStore(); + } - LogMutation logMutation = new LogMutation(updates, TEST_USER); - configStore.logMutation(logMutation); - configStore.confirmMutation(logMutation, validityFlag); + @Override + Version getVersion() { + return null; } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java index 519b61156ebf2..5d68482929edf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.ImmutableTable; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableTable; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueResourceQuotas; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java index 9095ac1291c73..09e2086db91f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsNamespace.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsNamespace.java index 27a121a833e34..19cb24d1ac511 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsNamespace.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsNamespace.java @@ -15,8 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.junit.Assert; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java index 976906d2a333d..5951e190b88c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java @@ -34,7 +34,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.SchedulingRequest; import org.apache.hadoop.yarn.api.resource.PlacementConstraint; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java index b9c192146d4dd..7821bc5567104 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java @@ -43,7 +43,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -71,7 +71,7 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.mockito.Mockito; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestLocalAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestLocalAllocationTagsManager.java index 2ac5c3dc28c40..5ce3130c5e08e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestLocalAllocationTagsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestLocalAllocationTagsManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java index 818fcc9e38622..274152f47afbf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java index f7648c86d4bb5..9c916e36418bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SimpleGroupsMapping.java @@ -25,10 +25,17 @@ import org.apache.hadoop.security.GroupMappingServiceProvider; public class SimpleGroupsMapping implements GroupMappingServiceProvider { - + @Override public List getGroups(String user) { - return Arrays.asList(user + "group", user + "subgroup1", user + "subgroup2"); + if ("admins".equals(user)) { + return Arrays.asList("root"); + } else if ("nosecondarygroupuser".equals(user)) { + return Arrays.asList("primarygrouponly"); + } else { + return Arrays.asList( + user + "group", user + "subgroup1", user + "subgroup2"); + } } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java index 0650027b8dd5e..9fb76cf261ea3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java @@ -736,6 +736,72 @@ public void testParentWithReservation() throws Exception { } } + /** + * Verify that a parent queue (type = parent) cannot have a maxAMShare element + * as dynamic queues won't be able to inherit this setting. + */ + @Test + public void testParentTagWithMaxAMShare() throws Exception { + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + + AllocationFileWriter.create() + .addQueue(new AllocationFileQueue.Builder("parent") + .parent(true) + .maxAMShare(0.75) + .build()) + .writeToFile(ALLOC_FILE); + + AllocationFileLoaderService allocLoader = + new AllocationFileLoaderService(scheduler); + allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); + try { + allocLoader.reloadAllocations(); + fail("Expect allocation parsing to fail as maxAMShare cannot be set for" + + " a parent queue."); + } catch (AllocationConfigurationException ex) { + assertEquals(ex.getMessage(), "The configuration settings for root.parent" + + " are invalid. A queue element that contains child queue elements" + + " or that has the type='parent' attribute cannot also include a" + + " maxAMShare element."); + } + } + + /** + * Verify that a parent queue that is not explicitly tagged with "type" + * as "parent" but has a child queue (implicit parent) cannot have a + * maxAMShare element. + */ + @Test + public void testParentWithMaxAMShare() throws Exception { + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + + AllocationFileWriter.create() + .addQueue(new AllocationFileQueue.Builder("parent") + .parent(false) + .maxAMShare(0.76) + .subQueue(new AllocationFileQueue.Builder("child").build()) + .build()) + .writeToFile(ALLOC_FILE); + + AllocationFileLoaderService allocLoader = + new AllocationFileLoaderService(scheduler); + allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); + try { + allocLoader.reloadAllocations(); + fail("Expect allocation parsing to fail as maxAMShare cannot be set for" + + " a parent queue."); + } catch (AllocationConfigurationException ex) { + assertEquals(ex.getMessage(), "The configuration settings for root.parent" + + " are invalid. A queue element that contains child queue elements" + + " or that has the type='parent' attribute cannot also include a" + + " maxAMShare element."); + } + } + @Test public void testParentTagWithChild() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java index 443c7963cc932..0d1f6294d8701 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java @@ -18,8 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -35,6 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ClusterNodeTracker; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; @@ -86,6 +88,9 @@ public Configuration createConfiguration() { @SuppressWarnings("deprecation") @Before public void setup() { + QueueMetrics.clearQueueMetrics(); + DefaultMetricsSystem.setMiniClusterMode(true); + mockClock = new ControlledClock(); conf = createConfiguration(); resourceManager = new MockRM(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java index 6ecf80996d830..5af9bc3acdfef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java @@ -33,16 +33,16 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.metrics.CustomResourceMetricValue; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetricsCustomResource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; @@ -344,7 +344,7 @@ private void verifyAMShare(FSLeafQueue schedulable, private Map verifyQueueMetricsForCustomResources( FSLeafQueue schedulable) { - QueueMetricsCustomResource maxAMShareCustomResources = + CustomResourceMetricValue maxAMShareCustomResources = schedulable.getMetrics().getCustomResources().getMaxAMShare(); Map customResourceValues = maxAMShareCustomResources .getValues(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java index 253e00359d10c..15e2d02421725 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 2e043fb048128..b61f5ba03ebe6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAServiceProtocol; @@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.YarnAuthorizationProvider; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; @@ -124,6 +125,7 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES; import static org.junit.Assert.assertEquals; @@ -144,6 +146,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { @Before public void setUp() throws IOException { + DefaultMetricsSystem.setMiniClusterMode(true); scheduler = new FairScheduler(); conf = createConfiguration(); resourceManager = new MockRM(conf); @@ -4862,9 +4865,12 @@ public void testUserAsDefaultQueueWithLeadingTrailingSpaceUserName() @Test public void testRemovedNodeDecomissioningNode() throws Exception { + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register nodemanager NodeManager nm = registerNode("host_decom", 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); RMNode node = resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); @@ -4907,10 +4913,14 @@ public void handle(Event event) { ((FairScheduler) resourceManager.getResourceScheduler()) .setRMContext(spyContext); ((AsyncDispatcher) mockDispatcher).start(); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); RMNode node = resourceManager.getRMContext().getRMNodes().get(nm_0.getNodeId()); @@ -4949,11 +4959,13 @@ public void handle(Event event) { } private NodeManager registerNode(String hostName, int containerManagerPort, - int httpPort, String rackName, - Resource capability) + int httpPort, String rackName, + Resource capability, NodeStatus nodeStatus) throws IOException, YarnException { + NodeStatus mockNodeStatus = createMockNodeStatus(); + NodeManager nm = new NodeManager(hostName, containerManagerPort, httpPort, - rackName, capability, resourceManager); + rackName, capability, resourceManager, mockNodeStatus); // after YARN-5375, scheduler event is processed in rm main dispatcher, // wait it processed, or may lead dead lock diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java index 522d3e5eac478..2ec1af363b1ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; -import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.Resource; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java index a521b3a3c92c3..1bad2b0e060b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java @@ -35,8 +35,8 @@ import org.junit.Test; import org.mockito.Mockito; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** * Test the {@link FairScheduler} queue manager correct queue hierarchies diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueue.java index ebf0616b2e640..c37e9a840d286 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueue.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocationfile; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.PrintWriter; import java.io.StringWriter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementPolicy.java index 48aab06ef34a1..fa878c7715507 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementPolicy.java @@ -17,7 +17,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocationfile; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.PrintWriter; import java.io.StringWriter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementRule.java index 93118d311849b..2ebb56433a7b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementRule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocationfile/AllocationFileQueuePlacementRule.java @@ -17,7 +17,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocationfile; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import java.io.PrintWriter; import java.io.StringWriter; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java index 9f41d3147949d..addf95fc70f2d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigArgumentHandler.java @@ -45,7 +45,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Unit tests for FSConfigToCSConfigArgumentHandler. @@ -651,4 +651,35 @@ public void testValidationSkippedWhenOutputIsConsole() throws Exception { verifyZeroInteractions(mockValidator); } + + @Test + public void testEnabledAsyncScheduling() throws Exception { + setupFSConfigConversionFiles(true); + + FSConfigToCSConfigArgumentHandler argumentHandler = + new FSConfigToCSConfigArgumentHandler(conversionOptions, mockValidator); + + String[] args = getArgumentsAsArrayWithDefaults("-f", + FSConfigConverterTestCommons.FS_ALLOC_FILE, "-p", + "-a"); + argumentHandler.parseAndConvert(args); + + assertTrue("-a switch had no effect", + conversionOptions.isEnableAsyncScheduler()); + } + + @Test + public void testDisabledAsyncScheduling() throws Exception { + setupFSConfigConversionFiles(true); + + FSConfigToCSConfigArgumentHandler argumentHandler = + new FSConfigToCSConfigArgumentHandler(conversionOptions, mockValidator); + + String[] args = getArgumentsAsArrayWithDefaults("-f", + FSConfigConverterTestCommons.FS_ALLOC_FILE, "-p"); + argumentHandler.parseAndConvert(args); + + assertFalse("-a switch wasn't provided but async scheduling option is true", + conversionOptions.isEnableAsyncScheduler()); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java index 4ceb8c857250e..141a4f88c5a0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigConverter.java @@ -155,17 +155,7 @@ private void createConverter() { .withOutputDirectory(FSConfigConverterTestCommons.OUTPUT_DIR); } - @Test - public void testDefaultMaxApplications() throws Exception { - converter.convert(config); - - Configuration conf = converter.getCapacitySchedulerConfig(); - int maxApps = - conf.getInt( - CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS, -1); - assertEquals("Default max apps", 15, maxApps); - } @Test public void testDefaultMaxAMShare() throws Exception { @@ -252,57 +242,73 @@ public void testConvertACLs() throws Exception { } @Test - public void testDefaultMaxRunningApps() throws Exception { + public void testDefaultQueueMaxParallelApps() throws Exception { converter.convert(config); Configuration conf = converter.getCapacitySchedulerConfig(); - // default setting - assertEquals("Default max apps", 15, - conf.getInt(PREFIX + "maximum-applications", -1)); + assertEquals("Default max parallel apps", 15, + conf.getInt(PREFIX + "max-parallel-apps", -1)); } @Test - public void testQueueMaxChildCapacityNotSupported() throws Exception { - expectedException.expect(UnsupportedPropertyException.class); - expectedException.expectMessage("test"); + public void testSpecificQueueMaxParallelApps() throws Exception { + converter.convert(config); - Mockito.doThrow(new UnsupportedPropertyException("test")) - .when(ruleHandler).handleMaxChildCapacity(); + Configuration conf = converter.getCapacitySchedulerConfig(); - converter.convert(config); + assertEquals("root.admins.alice max parallel apps", 2, + conf.getInt(PREFIX + "root.admins.alice.max-parallel-apps", -1)); } @Test - public void testReservationSystemNotSupported() throws Exception { - expectedException.expect(UnsupportedPropertyException.class); - expectedException.expectMessage("maxCapacity"); + public void testDefaultUserMaxParallelApps() throws Exception { + converter.convert(config); - Mockito.doThrow(new UnsupportedPropertyException("maxCapacity")) - .when(ruleHandler).handleMaxChildCapacity(); - config.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true); + Configuration conf = converter.getCapacitySchedulerConfig(); + int userMaxParallelApps = + conf.getInt( + PREFIX + "user.max-parallel-apps", -1); + assertEquals("Default user max parallel apps", 10, + userMaxParallelApps); + } + + @Test + public void testSpecificUserMaxParallelApps() throws Exception { converter.convert(config); + + Configuration conf = converter.getCapacitySchedulerConfig(); + + assertEquals("Max parallel apps for alice", 30, + conf.getInt(PREFIX + "user.alice.max-parallel-apps", -1)); + assertNull("Max parallel apps should be undefined for user bob", + conf.get(PREFIX + "user.bob.max-parallel-apps")); + assertNull("Max parallel apps should be undefined for user joe", + conf.get(PREFIX + "user.joe.max-parallel-apps")); + assertNull("Max parallel apps should be undefined for user john", + conf.get(PREFIX + "user.john.max-parallel-apps")); } @Test - public void testUserMaxAppsNotSupported() throws Exception { + public void testQueueMaxChildCapacityNotSupported() throws Exception { expectedException.expect(UnsupportedPropertyException.class); - expectedException.expectMessage("userMaxApps"); + expectedException.expectMessage("test"); - Mockito.doThrow(new UnsupportedPropertyException("userMaxApps")) - .when(ruleHandler).handleUserMaxApps(); + Mockito.doThrow(new UnsupportedPropertyException("test")) + .when(ruleHandler).handleMaxChildCapacity(); converter.convert(config); } @Test - public void testUserMaxAppsDefaultNotSupported() throws Exception { + public void testReservationSystemNotSupported() throws Exception { expectedException.expect(UnsupportedPropertyException.class); - expectedException.expectMessage("userMaxAppsDefault"); + expectedException.expectMessage("maxCapacity"); - Mockito.doThrow(new UnsupportedPropertyException("userMaxAppsDefault")) - .when(ruleHandler).handleUserMaxAppsDefault(); + Mockito.doThrow(new UnsupportedPropertyException("maxCapacity")) + .when(ruleHandler).handleMaxChildCapacity(); + config.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true); converter.convert(config); } @@ -649,6 +655,7 @@ public void testAutoCreateChildQueuesFalseWithoutPlacementRules() testAutoCreateChildQueuesWithoutPlacementRules(false); } + @SuppressWarnings("checkstyle:linelength") private void testAutoCreateChildQueuesWithoutPlacementRules( boolean allowUndeclaredPools) throws Exception { config = new Configuration(false); @@ -661,15 +668,27 @@ private void testAutoCreateChildQueuesWithoutPlacementRules( converter.convert(config); Configuration convertedConf = converter.getCapacitySchedulerConfig(); - String property = + String rootUserAutoCreate = + "yarn.scheduler.capacity.root.users.auto-create-child-queue.enabled"; + String rootAutoCreate = "yarn.scheduler.capacity.root.auto-create-child-queue.enabled"; + String leafQueueAutoCreate = + "yarn.scheduler.capacity.root.users.joe.auto-create-child-queue.enabled"; if (allowUndeclaredPools) { - assertEquals("Auto-create queue wasn't enabled", true, - convertedConf.getBoolean(property, false)); + assertEquals("Auto-create queue wasn't enabled for root.users", true, + convertedConf.getBoolean(rootUserAutoCreate, false)); + assertNull("Auto-create queue shouldn't be set for root", + convertedConf.get(rootAutoCreate)); + assertNull("Auto-create queue shouldn't be set for leaf", + convertedConf.get(leafQueueAutoCreate)); } else { - assertNull("Auto-create queue shouldn't be set", - convertedConf.get(property)); + assertNull("Auto-create queue shouldn't be set for root.users", + convertedConf.get(rootUserAutoCreate)); + assertNull("Auto-create queue shouldn't be set for root", + convertedConf.get(rootAutoCreate)); + assertNull("Auto-create queue shouldn't be set for leaf", + convertedConf.get(leafQueueAutoCreate)); } } @@ -704,6 +723,41 @@ public void testPlacementRulesConversionEnabled() throws Exception { any(Boolean.class)); } + @Test + public void testConversionWhenAsyncSchedulingIsEnabled() + throws Exception { + boolean schedulingEnabledValue = testConversionWithAsyncSchedulingOption(true); + assertTrue("Asynchronous scheduling should be true", schedulingEnabledValue); + } + + @Test + public void testConversionWhenAsyncSchedulingIsDisabled() throws Exception { + boolean schedulingEnabledValue = testConversionWithAsyncSchedulingOption(false); + assertEquals("Asynchronous scheduling should be the default value", + CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE, + schedulingEnabledValue); + } + + private boolean testConversionWithAsyncSchedulingOption(boolean enabled) throws Exception { + FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder() + .withClusterResource(CLUSTER_RESOURCE_STRING) + .withFairSchedulerXmlConfig(FAIR_SCHEDULER_XML) + .build(); + + ConversionOptions conversionOptions = createDefaultConversionOptions(); + conversionOptions.setEnableAsyncScheduler(enabled); + + converter = new FSConfigToCSConfigConverter(ruleHandler, + conversionOptions); + + converter.convert(params); + + Configuration convertedConfig = converter.getYarnSiteConfig(); + + return convertedConfig.getBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, + CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE); + } + private Configuration getConvertedCSConfig(String dir) throws IOException { File capacityFile = new File(dir, "capacity-scheduler.xml"); ByteArrayInputStream input = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigRuleHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigRuleHandler.java index b563e64364915..d1eee29118142 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigRuleHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSConfigToCSConfigRuleHandler.java @@ -76,8 +76,6 @@ public void testInitPropertyActionsToWarning() throws IOException { ruleHandler.handleQueueAutoCreate("test"); ruleHandler.handleReservationSystem(); ruleHandler.handleSpecifiedNotFirstRule(); - ruleHandler.handleUserMaxApps(); - ruleHandler.handleUserMaxAppsDefault(); } @Test @@ -106,8 +104,6 @@ public void testAllRulesWarning() throws IOException { ruleHandler.handleQueueAutoCreate("test"); ruleHandler.handleReservationSystem(); ruleHandler.handleSpecifiedNotFirstRule(); - ruleHandler.handleUserMaxApps(); - ruleHandler.handleUserMaxAppsDefault(); } @Test @@ -140,8 +136,6 @@ public void testAllRulesAbort() throws IOException { expectAbort(() -> ruleHandler.handleQueueAutoCreate("test")); expectAbort(() -> ruleHandler.handleReservationSystem()); expectAbort(() -> ruleHandler.handleSpecifiedNotFirstRule()); - expectAbort(() -> ruleHandler.handleUserMaxApps()); - expectAbort(() -> ruleHandler.handleUserMaxAppsDefault()); expectAbort(() -> ruleHandler.handleFairAsDrf("test")); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java index 41018623e890f..c8b2fa3c81fb7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSQueueConverter.java @@ -47,7 +47,7 @@ import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; /** @@ -195,18 +195,18 @@ public void testQueueMaxAMShare() { } @Test - public void testQueueMaxRunningApps() { + public void testQueueMaxParallelApps() { converter = builder.build(); converter.convertQueueHierarchy(rootQueue); assertEquals("root.admins.alice max apps", 2, - csConfig.getInt(PREFIX + "root.admins.alice.maximum-applications", + csConfig.getInt(PREFIX + "root.admins.alice.max-parallel-apps", -1)); Set remaining = Sets.difference(ALL_QUEUES, Sets.newHashSet("root.admins.alice")); - assertNoValueForQueues(remaining, ".maximum-applications", csConfig); + assertNoValueForQueues(remaining, ".max-parallel-apps", csConfig); } @Test @@ -316,7 +316,7 @@ public void testQueueAutoCreateChildQueue() { converter.convertQueueHierarchy(rootQueue); - Set parentQueues = Sets.newHashSet("root", + Set parentQueues = Sets.newHashSet( "root.admins", "root.users"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java index 0bd8fb1dbbb9f..e29f53e5e1ce0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestFSYarnSiteConverter.java @@ -26,6 +26,7 @@ import org.junit.Test; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; /** @@ -52,7 +53,8 @@ public void testSiteContinuousSchedulingConversion() { yarnConfig.setInt( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, 666); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertTrue("Cont. scheduling", yarnConvertedConfig.getBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false)); @@ -67,7 +69,7 @@ public void testSiteMinimumAllocationIncrementConversion() { yarnConfig.setInt("yarn.resource-types.memory-mb.increment-allocation", 11); yarnConfig.setInt("yarn.resource-types.vcores.increment-allocation", 5); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, false); assertEquals("Memory alloc increment", 11, yarnConvertedConfig.getInt("yarn.scheduler.minimum-allocation-mb", @@ -85,7 +87,8 @@ public void testSitePreemptionConversion() { FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS, 321); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertTrue("Preemption enabled", yarnConvertedConfig.getBoolean( @@ -105,7 +108,8 @@ public void testSitePreemptionConversion() { public void testSiteAssignMultipleConversion() { yarnConfig.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertTrue("Assign multiple", yarnConvertedConfig.getBoolean( @@ -117,7 +121,8 @@ public void testSiteAssignMultipleConversion() { public void testSiteMaxAssignConversion() { yarnConfig.setInt(FairSchedulerConfiguration.MAX_ASSIGN, 111); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertEquals("Max assign", 111, yarnConvertedConfig.getInt( @@ -131,7 +136,8 @@ public void testSiteLocalityThresholdConversion() { yarnConfig.set(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, "321.321"); - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertEquals("Locality threshold node", "123.123", yarnConvertedConfig.get( @@ -143,7 +149,8 @@ public void testSiteLocalityThresholdConversion() { @Test public void testSiteDrfEnabledConversion() { - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true, + false); assertEquals("Resource calculator type", DominantResourceCalculator.class, yarnConvertedConfig.getClass( @@ -152,11 +159,32 @@ public void testSiteDrfEnabledConversion() { @Test public void testSiteDrfDisabledConversion() { - converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false); + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); assertEquals("Resource calculator type", DefaultResourceCalculator.class, yarnConvertedConfig.getClass( CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, CapacitySchedulerConfiguration.DEFAULT_RESOURCE_CALCULATOR_CLASS)); } + + @Test + public void testAsyncSchedulingEnabledConversion() { + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true, + true); + + assertTrue("Asynchronous scheduling", yarnConvertedConfig.getBoolean( + CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, + CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE)); + } + + @Test + public void testAsyncSchedulingDisabledConversion() { + converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, + false); + + assertFalse("Asynchronous scheduling", yarnConvertedConfig.getBoolean( + CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, + CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE)); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestQueuePlacementConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestQueuePlacementConverter.java index 1a644bb4c8d2c..900a8226f11c1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestQueuePlacementConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/TestQueuePlacementConverter.java @@ -46,7 +46,7 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; /** * Unit tests for QueuePlacementConverter. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 01fb6a79b4a15..9b3657e00d5a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -33,6 +34,7 @@ import java.util.Map; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -143,10 +145,10 @@ public void tearDown() throws Exception { private NodeManager registerNode(String hostName, int containerManagerPort, int nmHttpPort, String rackName, - Resource capability) + Resource capability, NodeStatus nodeStatus) throws IOException, YarnException { NodeManager nm = new NodeManager(hostName, containerManagerPort, - nmHttpPort, rackName, capability, resourceManager); + nmHttpPort, rackName, capability, resourceManager, nodeStatus); NodeAddedSchedulerEvent nodeAddEvent1 = new NodeAddedSchedulerEvent(resourceManager.getRMContext().getRMNodes() .get(nm.getNodeId())); @@ -406,19 +408,21 @@ public void testFifoScheduler() throws Exception { LOG.info("--- START: testFifoScheduler ---"); final int GB = 1024; - + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node1 String host_0 = "host_0"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(4 * GB, 1)); + Resources.createResource(4 * GB, 1), mockNodeStatus); nm_0.heartbeat(); // Register node2 String host_1 = "host_1"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * GB, 1)); + Resources.createResource(2 * GB, 1), mockNodeStatus); nm_1.heartbeat(); // ResourceRequest priorities @@ -1197,9 +1201,12 @@ public void testResourceOverCommit() throws Exception { @Test public void testRemovedNodeDecomissioningNode() throws Exception { + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register nodemanager NodeManager nm = registerNode("host_decom", 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); RMNode node = resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); @@ -1242,10 +1249,14 @@ public void handle(Event event) { ((FifoScheduler) resourceManager.getResourceScheduler()) .setRMContext(spyContext); ((AsyncDispatcher) mockDispatcher).start(); + + NodeStatus mockNodeStatus = createMockNodeStatus(); + // Register node String host_0 = "host_0"; NodeManager nm_0 = registerNode(host_0, 1234, 2345, - NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4)); + NetworkTopology.DEFAULT_RACK, Resources.createResource(8 * GB, 4), + mockNodeStatus); // ResourceRequest priorities Priority priority_0 = Priority.newInstance(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java index 8734d9f36f9ec..03b68944b481f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java @@ -23,7 +23,7 @@ import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.ProtobufRpcEngine2; import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; @@ -160,7 +160,7 @@ protected void serviceStart() throws Exception { Configuration conf = getConfig(); // Set RPC engine to protobuf RPC engine RPC.setProtocolEngine(conf, CustomProtocol.class, - ProtobufRpcEngine.class); + ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); BlockingService service = TestRpcServiceProtos.CustomProto @@ -194,7 +194,7 @@ public void testClientToAMTokens() throws Exception { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); // Set RPC engine to protobuf RPC engine - RPC.setProtocolEngine(conf, CustomProtocol.class, ProtobufRpcEngine.class); + RPC.setProtocolEngine(conf, CustomProtocol.class, ProtobufRpcEngine2.class); UserGroupInformation.setConfiguration(conf); ContainerManagementProtocol containerManager = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 31a87cb71bec4..2856c271f39c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -114,7 +114,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.base.Supplier; +import java.util.function.Supplier; /** * unit test - @@ -196,6 +196,10 @@ public void cancel(Token t, Configuration conf) { private static Configuration conf; DelegationTokenRenewer delegationTokenRenewer; + private MockRM rm; + private MockRM rm1; + private MockRM rm2; + private DelegationTokenRenewer localDtr; @BeforeClass public static void setUpClass() throws Exception { @@ -217,6 +221,8 @@ public void setUp() throws Exception { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); conf.set("override_token_expire_time", "3000"); + conf.setBoolean(YarnConfiguration.RM_DELEGATION_TOKEN_ALWAYS_CANCEL, + false); UserGroupInformation.setConfiguration(conf); eventQueue = new LinkedBlockingQueue(); dispatcher = new AsyncDispatcher(eventQueue); @@ -241,13 +247,30 @@ public void setUp() throws Exception { } @After - public void tearDown() { + public void tearDown() throws Exception { try { dispatcher.close(); } catch (IOException e) { LOG.debug("Unable to close the dispatcher. " + e); } delegationTokenRenewer.stop(); + + if (rm != null) { + rm.close(); + rm = null; + } + if (rm1 != null) { + rm1.close(); + rm1 = null; + } + if (rm2 != null) { + rm2.close(); + rm2 = null; + } + if (localDtr != null) { + localDtr.close(); + localDtr = null; + } } private static class MyDelegationTokenSecretManager extends DelegationTokenSecretManager { @@ -369,9 +392,9 @@ static MyToken createTokens(Text renewer) return token1; } - private RMApp submitApp(MockRM rm, Credentials cred, ByteBuffer tokensConf) - throws Exception { - int maxAttempts = rm.getConfig().getInt( + private RMApp submitApp(MockRM mockrm, + Credentials cred, ByteBuffer tokensConf) throws Exception { + int maxAttempts = mockrm.getConfig().getInt( YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.create() @@ -395,7 +418,7 @@ private RMApp submitApp(MockRM rm, Credentials cred, ByteBuffer tokensConf) .withApplicationTimeouts(null) .withTokensConf(tokensConf) .build(); - return MockRMAppSubmitter.submit(rm, data); + return MockRMAppSubmitter.submit(mockrm, data); } @@ -608,6 +631,76 @@ public void testDTRenewalWithNoCancel () throws Exception { token1.renew(conf); } + /** + * Basic idea of the test: + * 1. Verify that YarnConfiguration.RM_DELEGATION_TOKEN_ALWAYS_CANCEL = true + * overrides shouldCancelAtEnd + * 2. register a token for 2 seconds with shouldCancelAtEnd = false + * 3. cancel it immediately + * 4. check that token was canceled + * @throws IOException + * @throws URISyntaxException + */ + @Test(timeout=60000) + public void testDTRenewalWithNoCancelAlwaysCancel() throws Exception { + Configuration lconf = new Configuration(conf); + lconf.setBoolean(YarnConfiguration.RM_DELEGATION_TOKEN_ALWAYS_CANCEL, + true); + + localDtr = createNewDelegationTokenRenewer(lconf, counter); + RMContext mockContext = mock(RMContext.class); + when(mockContext.getSystemCredentialsForApps()).thenReturn( + new ConcurrentHashMap()); + ClientRMService mockClientRMService = mock(ClientRMService.class); + when(mockContext.getClientRMService()).thenReturn(mockClientRMService); + when(mockContext.getDelegationTokenRenewer()).thenReturn( + localDtr); + when(mockContext.getDispatcher()).thenReturn(dispatcher); + InetSocketAddress sockAddr = + InetSocketAddress.createUnresolved("localhost", 1234); + when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); + localDtr.setDelegationTokenRenewerPoolTracker(false); + localDtr.setRMContext(mockContext); + localDtr.init(lconf); + localDtr.start(); + + MyFS dfs = (MyFS)FileSystem.get(lconf); + LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+lconf.hashCode()); + + Credentials ts = new Credentials(); + MyToken token1 = dfs.getDelegationToken("user1"); + + //to cause this one to be set for renew in 2 secs + Renewer.tokenToRenewIn2Sec = token1; + LOG.info("token="+token1+" should be renewed for 2 secs"); + + String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0"; + ts.addToken(new Text(nn1), token1); + + ApplicationId applicationId = BuilderUtils.newApplicationId(0, 1); + localDtr.addApplicationAsync(applicationId, ts, false, "user", + new Configuration()); + waitForEventsToGetProcessed(localDtr); + localDtr.applicationFinished(applicationId); + waitForEventsToGetProcessed(localDtr); + + int numberOfExpectedRenewals = Renewer.counter; // number of renewals so far + try { + Thread.sleep(6*1000); // sleep 6 seconds, so it has time to renew + } catch (InterruptedException e) {} + LOG.info("Counter = " + Renewer.counter + ";t="+ Renewer.lastRenewed); + + // counter and the token should still be the old ones + assertEquals("renew wasn't called as many times as expected", + numberOfExpectedRenewals, Renewer.counter); + + // The token should have been cancelled at this point. Renewal will fail. + try { + token1.renew(lconf); + fail("Renewal of cancelled token should have failed"); + } catch (InvalidToken ite) {} + } + /** * Basic idea of the test: * 0. Setup token KEEP_ALIVE @@ -629,8 +722,7 @@ public void testDTKeepAlive1 () throws Exception { lconf.setLong( YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS, 1000l); - DelegationTokenRenewer localDtr = - createNewDelegationTokenRenewer(lconf, counter); + localDtr = createNewDelegationTokenRenewer(lconf, counter); RMContext mockContext = mock(RMContext.class); when(mockContext.getSystemCredentialsForApps()).thenReturn( new ConcurrentHashMap()); @@ -710,8 +802,7 @@ public void testDTKeepAlive2() throws Exception { lconf.setLong( YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS, 1000l); - DelegationTokenRenewer localDtr = - createNewDelegationTokenRenewer(conf, counter); + localDtr = createNewDelegationTokenRenewer(conf, counter); RMContext mockContext = mock(RMContext.class); when(mockContext.getSystemCredentialsForApps()).thenReturn( new ConcurrentHashMap()); @@ -816,8 +907,7 @@ public void testDTRonAppSubmission() doThrow(new IOException("boom")) .when(tokenx).renew(any(Configuration.class)); // fire up the renewer - final DelegationTokenRenewer dtr = - createNewDelegationTokenRenewer(conf, counter); + localDtr = createNewDelegationTokenRenewer(conf, counter); RMContext mockContext = mock(RMContext.class); when(mockContext.getSystemCredentialsForApps()).thenReturn( new ConcurrentHashMap()); @@ -827,13 +917,14 @@ public void testDTRonAppSubmission() InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); - dtr.setRMContext(mockContext); - when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr); - dtr.init(conf); - dtr.start(); + localDtr.setRMContext(mockContext); + when(mockContext.getDelegationTokenRenewer()).thenReturn(localDtr); + localDtr.init(conf); + localDtr.start(); try { - dtr.addApplicationSync(mock(ApplicationId.class), credsx, false, "user"); + localDtr.addApplicationSync(mock(ApplicationId.class), + credsx, false, "user"); fail("Catch IOException on app submission"); } catch (IOException e){ Assert.assertTrue(e.getMessage().contains(tokenx.toString())); @@ -876,8 +967,8 @@ public Long answer(InvocationOnMock invocation) doReturn(Long.MAX_VALUE).when(token2).renew(any(Configuration.class)); // fire up the renewer - final DelegationTokenRenewer dtr = - createNewDelegationTokenRenewer(conf, counter); + localDtr = createNewDelegationTokenRenewer(conf, counter); + RMContext mockContext = mock(RMContext.class); when(mockContext.getSystemCredentialsForApps()).thenReturn( new ConcurrentHashMap()); @@ -887,24 +978,24 @@ public Long answer(InvocationOnMock invocation) InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); - dtr.setRMContext(mockContext); - when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr); - dtr.init(conf); - dtr.start(); + localDtr.setRMContext(mockContext); + when(mockContext.getDelegationTokenRenewer()).thenReturn(localDtr); + localDtr.init(conf); + localDtr.start(); // submit a job that blocks during renewal Thread submitThread = new Thread() { @Override public void run() { - dtr.addApplicationAsync(mock(ApplicationId.class), creds1, false, "user", - new Configuration()); + localDtr.addApplicationAsync(mock(ApplicationId.class), + creds1, false, "user", new Configuration()); } }; submitThread.start(); // wait till 1st submit blocks, then submit another startBarrier.await(); - dtr.addApplicationAsync(mock(ApplicationId.class), creds2, false, "user", - new Configuration()); + localDtr.addApplicationAsync(mock(ApplicationId.class), + creds2, false, "user", new Configuration()); // signal 1st to complete endBarrier.await(); submitThread.join(); @@ -917,7 +1008,7 @@ public void testAppSubmissionWithInvalidDelegationToken() throws Exception { CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); - MockRM rm = new MockRM(conf) { + rm = new MockRM(conf) { @Override protected void doSecureLogin() throws IOException { // Skip the login. @@ -973,7 +1064,7 @@ public void testReplaceExpiringDelegationToken() throws Exception { new Token(dtId2.getBytes(), "password2".getBytes(), dtId2.getKind(), new Text("service2")); - final MockRM rm = new TestSecurityMockRM(conf, null) { + rm = new TestSecurityMockRM(conf, null) { @Override protected DelegationTokenRenewer createDelegationTokenRenewer() { return new DelegationTokenRenewer() { @@ -1076,7 +1167,7 @@ public void testRMRestartWithExpiredToken() throws Exception { Credentials credentials = new Credentials(); credentials.addToken(userText1, originalToken); - MockRM rm1 = new TestSecurityMockRM(yarnConf); + rm1 = new TestSecurityMockRM(yarnConf); MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore(); rm1.start(); RMApp app = MockRMAppSubmitter.submit(rm1, @@ -1100,7 +1191,7 @@ public void testRMRestartWithExpiredToken() throws Exception { "password2".getBytes(), dtId2.getKind(), new Text("service2")); AtomicBoolean firstRenewInvoked = new AtomicBoolean(false); AtomicBoolean secondRenewInvoked = new AtomicBoolean(false); - MockRM rm2 = new TestSecurityMockRM(yarnConf, memStore) { + rm2 = new TestSecurityMockRM(yarnConf, memStore) { @Override protected DelegationTokenRenewer createDelegationTokenRenewer() { return new DelegationTokenRenewer() { @@ -1110,8 +1201,8 @@ protected void renewToken(final DelegationTokenToRenew dttr) throws IOException { if (dttr.token.equals(updatedToken)) { - secondRenewInvoked.set(true); super.renewToken(dttr); + secondRenewInvoked.set(true); } else if (dttr.token.equals(originalToken)){ firstRenewInvoked.set(true); throw new InvalidToken("Failed to renew"); @@ -1137,6 +1228,9 @@ protected Token[] obtainSystemTokensForUser(String user, final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm2.getResourceTrackerService()); nm1.registerNode(); + + GenericTestUtils.waitFor(() -> secondRenewInvoked.get(), 100, 10000); + NodeHeartbeatResponse response = nm1.nodeHeartbeat(true); NodeHeartbeatResponse proto = new NodeHeartbeatResponsePBImpl( @@ -1168,7 +1262,7 @@ public void testAppSubmissionWithoutDelegationToken() throws Exception { final Token token2 = new Token(dtId2.getBytes(), "password2".getBytes(), dtId2.getKind(), new Text("service2")); - final MockRM rm = new TestSecurityMockRM(conf, null) { + rm = new TestSecurityMockRM(conf, null) { @Override protected DelegationTokenRenewer createDelegationTokenRenewer() { return new DelegationTokenRenewer() { @@ -1220,7 +1314,7 @@ public Boolean get() { // submitted application. @Test (timeout = 30000) public void testAppSubmissionWithPreviousToken() throws Exception{ - MockRM rm = new TestSecurityMockRM(conf, null); + rm = new TestSecurityMockRM(conf, null); rm.start(); final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService()); @@ -1296,7 +1390,7 @@ public void testFSLeakInObtainSystemTokensForUser() throws Exception{ // complete @Test (timeout = 30000) public void testCancelWithMultipleAppSubmissions() throws Exception{ - MockRM rm = new TestSecurityMockRM(conf, null); + rm = new TestSecurityMockRM(conf, null); rm.start(); final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService()); @@ -1411,10 +1505,10 @@ public Boolean get() { Assert.assertFalse(renewer.getDelegationTokens().contains(token1)); } - private void finishAMAndWaitForComplete(final RMApp app, MockRM rm, - MockNM nm, MockAM am, final DelegationTokenToRenew dttr) + private void finishAMAndWaitForComplete(final RMApp app, MockRM mockrm, + MockNM mocknm, MockAM mockam, final DelegationTokenToRenew dttr) throws Exception { - MockRM.finishAMAndVerifyAppState(app, rm, nm, am); + MockRM.finishAMAndVerifyAppState(app, mockrm, mocknm, mockam); GenericTestUtils.waitFor(new Supplier() { public Boolean get() { return !dttr.referringAppIds.contains(app.getApplicationId()); @@ -1430,7 +1524,7 @@ public void testRenewTokenUsingTokenConfProvidedByApp() throws Exception{ "kerberos"); UserGroupInformation.setConfiguration(conf); - MockRM rm = new TestSecurityMockRM(conf, null); + rm = new TestSecurityMockRM(conf, null); rm.start(); final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService()); @@ -1485,7 +1579,7 @@ public void testTokensConfExceedLimit() throws Exception { UserGroupInformation.setConfiguration(conf); // limit 100 bytes conf.setInt(YarnConfiguration.RM_DELEGATION_TOKEN_MAX_CONF_SIZE, 100); - MockRM rm = new TestSecurityMockRM(conf, null); + rm = new TestSecurityMockRM(conf, null); rm.start(); final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService()); @@ -1548,7 +1642,7 @@ public void testTokenRenewerInvalidReturn() throws Exception { */ @Test public void testShutDown() { - DelegationTokenRenewer dtr = createNewDelegationTokenRenewer(conf, counter); + localDtr = createNewDelegationTokenRenewer(conf, counter); RMContext mockContext = mock(RMContext.class); when(mockContext.getSystemCredentialsForApps()).thenReturn( new ConcurrentHashMap()); @@ -1558,10 +1652,10 @@ public void testShutDown() { InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); - dtr.setRMContext(mockContext); - when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr); - dtr.init(conf); - dtr.start(); + localDtr.setRMContext(mockContext); + when(mockContext.getDelegationTokenRenewer()).thenReturn(localDtr); + localDtr.init(conf); + localDtr.start(); delegationTokenRenewer.stop(); delegationTokenRenewer.applicationFinished( BuilderUtils.newApplicationId(0, 1)); @@ -1583,7 +1677,7 @@ public void testTokenSequenceNoAfterNewTokenAndRenewal() throws Exception { "password2".getBytes(), dtId1.getKind(), new Text("service2")); // fire up the renewer - final DelegationTokenRenewer dtr = new DelegationTokenRenewer() { + localDtr = new DelegationTokenRenewer() { @Override protected Token[] obtainSystemTokensForUser(String user, final Credentials credentials) throws IOException { @@ -1601,25 +1695,25 @@ protected Token[] obtainSystemTokensForUser(String user, InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); - dtr.setRMContext(mockContext); - when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr); - dtr.init(conf); - dtr.start(); + localDtr.setRMContext(mockContext); + when(mockContext.getDelegationTokenRenewer()).thenReturn(localDtr); + localDtr.init(conf); + localDtr.start(); final ApplicationId appId1 = ApplicationId.newInstance(1234, 1); Collection appIds = new ArrayList(1); appIds.add(appId1); - dtr.addApplicationSync(appId1, credsx, false, "user1"); + localDtr.addApplicationSync(appId1, credsx, false, "user1"); // Ensure incrTokenSequenceNo has been called for new token request Mockito.verify(mockContext, Mockito.times(1)).incrTokenSequenceNo(); - DelegationTokenToRenew dttr = new DelegationTokenToRenew(appIds, + DelegationTokenToRenew dttr = localDtr.new DelegationTokenToRenew(appIds, expectedToken, conf, 1000, false, "user1"); - dtr.requestNewHdfsDelegationTokenIfNeeded(dttr); + localDtr.requestNewHdfsDelegationTokenIfNeeded(dttr); // Ensure incrTokenSequenceNo has been called for token renewal as well. Mockito.verify(mockContext, Mockito.times(2)).incrTokenSequenceNo(); @@ -1637,16 +1731,17 @@ protected Token[] obtainSystemTokensForUser(String user, @Test(timeout = 30000) public void testTokenThreadTimeout() throws Exception { Configuration yarnConf = new YarnConfiguration(); + yarnConf.set("override_token_expire_time", "30000"); yarnConf.setBoolean(YarnConfiguration.RM_PROXY_USER_PRIVILEGES_ENABLED, true); yarnConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); yarnConf.setClass(YarnConfiguration.RM_STORE, MemoryRMStateStore.class, RMStateStore.class); - yarnConf.setTimeDuration(YarnConfiguration.RM_DT_RENEWER_THREAD_TIMEOUT, 5, + yarnConf.setTimeDuration(YarnConfiguration.RM_DT_RENEWER_THREAD_TIMEOUT, 2, TimeUnit.SECONDS); yarnConf.setTimeDuration( - YarnConfiguration.RM_DT_RENEWER_THREAD_RETRY_INTERVAL, 5, + YarnConfiguration.RM_DT_RENEWER_THREAD_RETRY_INTERVAL, 0, TimeUnit.SECONDS); yarnConf.setInt(YarnConfiguration.RM_DT_RENEWER_THREAD_RETRY_MAX_ATTEMPTS, 3); @@ -1670,7 +1765,7 @@ public void testTokenThreadTimeout() throws Exception { DelegationTokenRenewer renewer = createNewDelegationTokenRenewerForTimeout( yarnConf, threadCounter, renewDelay); - MockRM rm = new TestSecurityMockRM(yarnConf) { + rm = new TestSecurityMockRM(yarnConf) { @Override protected DelegationTokenRenewer createDelegationTokenRenewer() { return renewer; @@ -1693,8 +1788,7 @@ protected DelegationTokenRenewer createDelegationTokenRenewer() { YarnConfiguration.RM_DT_RENEWER_THREAD_RETRY_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_DT_RENEWER_THREAD_RETRY_MAX_ATTEMPTS); - GenericTestUtils.waitFor(() -> threadCounter.get() >= attempts, 2000, - 30000); + GenericTestUtils.waitFor(() -> threadCounter.get() >= attempts, 100, 20000); // Ensure no. of threads has been used in renewer service thread pool is // higher than the configured max retry attempts @@ -1743,7 +1837,7 @@ public void testTokenThreadTimeoutWithoutDelay() throws Exception { DelegationTokenRenewer renwer = createNewDelegationTokenRenewerForTimeout( yarnConf, threadCounter, renewDelay); - MockRM rm = new TestSecurityMockRM(yarnConf) { + rm = new TestSecurityMockRM(yarnConf) { @Override protected DelegationTokenRenewer createDelegationTokenRenewer() { return renwer; @@ -1807,4 +1901,4 @@ protected void renewToken(final DelegationTokenToRenew dttr) renew.setDelegationTokenRenewerPoolTracker(true); return renew; } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java index 94bf3edfdd18f..5554f69d1a0e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java @@ -26,7 +26,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/TestVolumeProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/TestVolumeProcessor.java index 531d543e7b8c0..f306ffe22b41f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/TestVolumeProcessor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/TestVolumeProcessor.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.volume.csi; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.CsiAdaptorProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java index 3c6db7d470620..922a28a84744a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ActivitiesTestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; import org.apache.hadoop.http.JettyUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java index 30e1c62b3e89d..3c840d3f62841 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestApplicationsRequestBuilder.java @@ -16,7 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java index 26e8c2ab668d1..9b79938d37238 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java @@ -19,12 +19,16 @@ import java.io.IOException; import java.io.PrintWriter; +import java.util.Map; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.webapp.NodesPage.NodesBlock; +import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.webapp.test.WebAppTests; import org.junit.Before; import org.junit.Test; @@ -48,14 +52,18 @@ public class TestNodesPage { // Number of Actual Table Headers for NodesPage.NodesBlock might change in // future. In that case this value should be adjusted to the new value. - private final int numberOfThInMetricsTable = 23; - private final int numberOfActualTableHeaders = 14; + private final int numberOfThInMetricsTable = 22; + private final int numberOfActualTableHeaders = 18; private final int numberOfThForOpportunisticContainers = 4; private Injector injector; @Before public void setUp() throws Exception { + setUpInternal(false); + } + + private void setUpInternal(final boolean useDRC) throws Exception { final RMContext mockRMContext = TestRMWebApp.mockRMContext(3, numberOfRacks, numberOfNodesPerRack, 8 * TestRMWebApp.GiB); @@ -66,7 +74,7 @@ public void setUp() throws Exception { public void configure(Binder binder) { try { binder.bind(ResourceManager.class).toInstance( - TestRMWebApp.mockRm(mockRMContext)); + TestRMWebApp.mockRm(mockRMContext, useDRC)); } catch (IOException e) { throw new IllegalStateException(e); } @@ -101,7 +109,22 @@ public void testNodesBlockRenderForLostNodes() { Mockito.verify(writer, Mockito.times(numberOfThInMetricsTable)) .print(" oldRtMap = + ResourceUtils.getResourceTypes(); + CustomResourceTypesConfigurationProvider. + initResourceTypes(ResourceInformation.GPU_URI); + this.setUpInternal(true); + try { + this.testNodesBlockRenderForLostNodes(); + } finally { + ResourceUtils.initializeResourcesFromResourceInformationMap(oldRtMap); + } + } + @Test public void testNodesBlockRenderForNodeLabelFilterWithNonEmptyLabel() { NodesBlock nodesBlock = injector.getInstance(NodesBlock.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index 1a4d6e3a6c221..8120bb21b1eba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -72,7 +72,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import com.google.inject.Binder; import com.google.inject.Injector; import com.google.inject.Module; @@ -247,8 +247,13 @@ public static ResourceManager mockRm(int apps, int racks, int nodes, } public static ResourceManager mockRm(RMContext rmContext) throws IOException { + return mockRm(rmContext, false); + } + + public static ResourceManager mockRm(RMContext rmContext, + boolean useDRC) throws IOException { ResourceManager rm = mock(ResourceManager.class); - ResourceScheduler rs = mockCapacityScheduler(); + ResourceScheduler rs = mockCapacityScheduler(useDRC); ApplicationACLsManager aclMgr = mockAppACLsManager(); ClientRMService clientRMService = mockClientRMService(rmContext); when(rm.getResourceScheduler()).thenReturn(rs); @@ -259,9 +264,14 @@ public static ResourceManager mockRm(RMContext rmContext) throws IOException { } public static CapacityScheduler mockCapacityScheduler() throws IOException { + return mockCapacityScheduler(false); + } + + public static CapacityScheduler mockCapacityScheduler(boolean useDRC) + throws IOException { // stolen from TestCapacityScheduler CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); - setupQueueConfiguration(conf); + setupQueueConfiguration(conf, useDRC); CapacityScheduler cs = new CapacityScheduler(); YarnConfiguration yarnConf = new YarnConfiguration(); @@ -313,6 +323,11 @@ public static ClientRMService mockClientRMService(RMContext rmContext) { static void setupQueueConfiguration(CapacitySchedulerConfiguration conf) { + setupQueueConfiguration(conf, false); + } + + static void setupQueueConfiguration(CapacitySchedulerConfiguration conf, + boolean useDRC) { // Define top-level queues conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"}); @@ -358,6 +373,10 @@ static void setupQueueConfiguration(CapacitySchedulerConfiguration conf) { conf.setCapacity(C11, 15); conf.setCapacity(C12, 45); conf.setCapacity(C13, 40); + if (useDRC) { + conf.set("yarn.scheduler.capacity.resource-calculator", + "org.apache.hadoop.yarn.util.resource.DominantResourceCalculator"); + } } public static ResourceManager mockFifoRm(int apps, int racks, int nodes, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java index 25d8530382e5d..58aabd2a8f3b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import com.google.inject.Binder; import com.google.inject.Injector; import com.google.inject.Module; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java index fe154acea73d5..6373ccaa482c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java @@ -53,8 +53,8 @@ import org.junit.Before; import org.junit.Test; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java index afc37fa57a088..76b0796eb2957 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java @@ -49,7 +49,7 @@ import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.JettyUtils; @@ -474,7 +474,7 @@ public void verifyClusterMetricsJSON(JSONObject json) throws JSONException, Exception { assertEquals("incorrect number of elements", 1, json.length()); JSONObject clusterinfo = json.getJSONObject("clusterMetrics"); - assertEquals("incorrect number of elements", 27, clusterinfo.length()); + assertEquals("incorrect number of elements", 31, clusterinfo.length()); verifyClusterMetrics( clusterinfo.getInt("appsSubmitted"), clusterinfo.getInt("appsCompleted"), clusterinfo.getInt("reservedMB"), clusterinfo.getInt("availableMB"), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java index bc34466170636..8a4a230a22363 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java index 324a392c7263f..1e856bd412159 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java @@ -20,9 +20,15 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AUTO_CREATED_LEAF_QUEUE_TEMPLATE_PREFIX; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.CAPACITY; +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX; + import java.io.StringReader; import javax.ws.rs.core.MediaType; @@ -79,6 +85,8 @@ private class QueueInfo { int numApplications; String queueName; String state; + boolean isAbsoluteResource; + boolean autoCreateChildQueueEnabled; } private class LeafQueueInfo extends QueueInfo { @@ -131,6 +139,9 @@ private static void setupQueueConfiguration( final String B = CapacitySchedulerConfiguration.ROOT + ".b"; config.setCapacity(B, 89.5f); + final String C = CapacitySchedulerConfiguration.ROOT + ".c"; + config.setCapacity(C, "[memory=1024]"); + // Define 2nd-level queues final String A1 = A + ".a1"; final String A2 = A + ".a2"; @@ -155,11 +166,17 @@ private static void setupQueueConfiguration( config.setCapacity(B3, 0.5f); config.setUserLimitFactor(B3, 100.0f); - config.setQueues(A1, new String[] {"a1a", "a1b"}); + config.setQueues(A1, new String[] {"a1a", "a1b", "a1c"}); final String A1A = A1 + ".a1a"; - config.setCapacity(A1A, 85); + config.setCapacity(A1A, 65); final String A1B = A1 + ".a1b"; config.setCapacity(A1B, 15); + final String A1C = A1 + ".a1c"; + config.setCapacity(A1C, 20); + + config.setAutoCreateChildQueueEnabled(A1C, true); + config.setInt(PREFIX + A1C + DOT + AUTO_CREATED_LEAF_QUEUE_TEMPLATE_PREFIX + + DOT + CAPACITY, 50); } @Before @@ -290,6 +307,10 @@ public void verifySubQueueXML(Element qElem, String q, WebServicesTestUtils.getXmlInt(qElem, "numApplications"); qi.queueName = WebServicesTestUtils.getXmlString(qElem, "queueName"); qi.state = WebServicesTestUtils.getXmlString(qElem, "state"); + qi.autoCreateChildQueueEnabled = WebServicesTestUtils.getXmlBoolean(qElem, + "autoCreateChildQueueEnabled"); + qi.isAbsoluteResource = WebServicesTestUtils.getXmlBoolean(qElem, + "isAbsoluteResource"); verifySubQueueGeneric(q, qi, parentAbsCapacity, parentAbsMaxCapacity); if (hasSubQueues) { for (int j = 0; j < children.getLength(); j++) { @@ -304,6 +325,14 @@ public void verifySubQueueXML(Element qElem, String q, } } } + } else if (qi.autoCreateChildQueueEnabled) { + assertEquals("queueName doesn't match", "a1c", qi.queueName); + String capacityStr = WebServicesTestUtils.getPropertyValue(qElem, + "leafQueueTemplate", AUTO_CREATED_LEAF_QUEUE_TEMPLATE_PREFIX + + DOT + CAPACITY); + int capacity = Integer.parseInt(capacityStr); + assertEquals(AUTO_CREATED_LEAF_QUEUE_TEMPLATE_PREFIX + DOT + + CAPACITY + " doesn't match", 50, capacity); } else { LeafQueueInfo lqi = (LeafQueueInfo) qi; lqi.numActiveApplications = @@ -384,10 +413,10 @@ private void verifyClusterSchedulerGeneric(String type, float usedCapacity, private void verifySubQueue(JSONObject info, String q, float parentAbsCapacity, float parentAbsMaxCapacity) throws JSONException, Exception { - int numExpectedElements = 24; + int numExpectedElements = 27; boolean isParentQueue = true; if (!info.has("queues")) { - numExpectedElements = 42; + numExpectedElements = 45; isParentQueue = false; } assertEquals("incorrect number of elements", numExpectedElements, info.length()); @@ -405,7 +434,10 @@ private void verifySubQueue(JSONObject info, String q, verifySubQueueGeneric(q, qi, parentAbsCapacity, parentAbsMaxCapacity); - if (isParentQueue) { + // Separate Condition for Managed Parent Queue + if (qi.queueName.equals("a1c")) { + assertTrue(info.getBoolean("autoCreateChildQueueEnabled")); + } else if (isParentQueue) { JSONArray arr = info.getJSONObject("queues").getJSONArray("queue"); // test subqueues for (int i = 0; i < arr.length(); i++) { @@ -423,6 +455,7 @@ private void verifySubQueue(JSONObject info, String q, assertEquals("0", info.getString("queuePriority")); assertEquals("utilization", info.getString("orderingPolicyInfo")); + assertFalse(info.getBoolean("autoCreateChildQueueEnabled")); } else { Assert.assertEquals("\"type\" field is incorrect", "capacitySchedulerLeafQueueInfo", info.getString("type")); @@ -471,6 +504,14 @@ private void verifySubQueueGeneric(String q, QueueInfo info, + " expected: " + q, qshortName.matches(info.queueName)); assertTrue("state doesn't match", (csConf.getState(q).toString()).matches(info.state)); + if (q.equals("c")) { + assertTrue("c queue is not configured in Absolute resource", + info.isAbsoluteResource); + } else { + assertFalse(info.queueName + + " queue is not configured in Absolute resource", + info.isAbsoluteResource); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java index cf96cfe880eb1..f5874986867ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java @@ -39,8 +39,8 @@ import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.http.JettyUtils; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; @@ -76,7 +76,7 @@ import org.w3c.dom.NodeList; import org.xml.sax.InputSource; -import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import com.google.inject.Guice; import com.google.inject.servlet.ServletModule; import com.sun.jersey.api.client.ClientResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java index c3f41f62f6f4c..c67c49a361006 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; +import static org.apache.hadoop.yarn.server.resourcemanager.MockNM.createMockNodeStatus; import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseStatusCode; import static org.junit.Assert.assertEquals; @@ -96,7 +97,7 @@ import org.w3c.dom.NodeList; import org.xml.sax.InputSource; -import com.google.common.base.Joiner; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import com.google.inject.Guice; import com.google.inject.Singleton; import com.google.inject.servlet.ServletModule; @@ -241,8 +242,10 @@ private RMNode getRunningRMNode(String host, int port, int memory) { } private void sendStartedEvent(RMNode node) { + NodeStatus mockNodeStatus = createMockNodeStatus(); ((RMNodeImpl) node) - .handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); + .handle(new RMNodeStartedEvent(node.getNodeID(), null, null, + mockNodeStatus)); } private void sendLostEvent(RMNode node) { @@ -858,7 +861,7 @@ public void verifyNodesXML(NodeList nodes, RMNode nm) public void verifyNodeInfo(JSONObject nodeInfo, RMNode nm) throws JSONException, Exception { - assertEquals("incorrect number of elements", 21, nodeInfo.length()); + assertEquals("incorrect number of elements", 23, nodeInfo.length()); JSONObject resourceInfo = nodeInfo.getJSONObject("resourceUtilization"); verifyNodeInfoGeneric(nm, nodeInfo.getString("state"), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java index 4d096604e33ed..bffa5309eb303 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.codehaus.jettison.json.JSONArray; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java index 5ca2301444b8f..4d0f361959939 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.w3c.dom.Document; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java index 19f6ca7fd773e..393a31ea179f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java index 2611a079382ff..45bc46ebc74e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java @@ -18,9 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml index 30fce436cb099..ee0f1a94fdf02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml @@ -19,12 +19,12 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-server-router - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Router diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java index 0a66241e68ae3..807912bd3e6e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java @@ -43,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * The router is a stateless YARN component which is the entry point to the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java index 6d75471518b36..884e06e4ba04f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.router; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.annotation.Metric; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java index 4cd4a017a99c7..c99fd2a06ad95 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java @@ -97,7 +97,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Extends the {@code AbstractRequestInterceptorClient} class and provides an diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java index c4ae6ab4d4b85..a721fe0d8eca2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.router.clientrm; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.IOException; import java.lang.reflect.Method; import java.util.ArrayList; @@ -130,7 +130,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Extends the {@code AbstractRequestInterceptorClient} class and provides an diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java index 7a1ae918f28fe..0c2254154f751 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java @@ -116,7 +116,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * RouterClientRMService is a service that runs on each router that can be used diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/DefaultRMAdminRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/DefaultRMAdminRequestInterceptor.java index b4140c155c355..14942fdaaaea5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/DefaultRMAdminRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/DefaultRMAdminRequestInterceptor.java @@ -59,7 +59,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Extends the {@link AbstractRMAdminRequestInterceptor} class and provides an diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java index 39c5f318adc46..ad8427f2b04ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java @@ -74,7 +74,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * RouterRMAdminService is a service that runs on each router that can be used diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java index b14da6c22c343..82a1bd375dc07 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java @@ -101,8 +101,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Extends the {@code AbstractRESTRequestInterceptor} class and provides an diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java index 4c694fb4c85bc..abe6cf0529e7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java @@ -91,7 +91,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.google.inject.Inject; import com.google.inject.Singleton; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServicesREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServicesREST.java index 2b3c692e50352..d3c619860a9cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServicesREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServicesREST.java @@ -132,8 +132,8 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.base.Supplier; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.util.function.Supplier; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.ClientResponse; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml index 7788fac509aa7..b1206eecff03b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml @@ -17,10 +17,10 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT hadoop-yarn-server-sharedcachemanager - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN SharedCacheManager diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java index bcdc46b8b3d42..f93b81e385c07 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java @@ -40,7 +40,7 @@ import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.CleanerMetrics; import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SharedCacheManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SharedCacheManager.java index ca683f231bd38..d565bdecb1ed2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SharedCacheManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SharedCacheManager.java @@ -33,7 +33,7 @@ import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore; import org.apache.hadoop.yarn.server.sharedcachemanager.webapp.SCMWebServer; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java index d917d9b1f1ac3..d186a5cde17df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java @@ -48,8 +48,8 @@ import org.apache.hadoop.yarn.server.sharedcache.SharedCacheUtil; import org.apache.hadoop.yarn.server.sharedcachemanager.AppChecker; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/SCMStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/SCMStore.java index 9eae556cca8b1..34116effbda2d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/SCMStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/SCMStore.java @@ -33,7 +33,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.sharedcachemanager.AppChecker; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml index 138d113f1d539..787060749c67c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml @@ -19,10 +19,10 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT hadoop-yarn-server-tests - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Server Tests @@ -118,8 +118,8 @@ hadoop-yarn-api
    - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava org.bouncycastle diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java index 01045342fc17b..828b5c43af4cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java @@ -41,8 +41,10 @@ import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ShellCommandExecutor; +import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.conf.HAUtil; @@ -97,7 +99,7 @@ import org.apache.hadoop.yarn.util.timeline.TimelineUtils; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -171,8 +173,11 @@ public MiniYARNCluster( this.numLocalDirs = numLocalDirs; this.numLogDirs = numLogDirs; this.enableAHS = enableAHS; - String testSubDir = testName.replace("$", ""); - File targetWorkDir = new File("target", testSubDir); + String yarnFolderName = String.format("yarn-%d", Time.monotonicNow()); + File targetWorkDirRoot = GenericTestUtils.getTestDir(getName()); + // make sure that the folder exists + targetWorkDirRoot.mkdirs(); + File targetWorkDir = new File(targetWorkDirRoot, yarnFolderName); try { FileContext.getLocalFSFileContext().delete( new Path(targetWorkDir.getAbsolutePath()), true); @@ -227,6 +232,7 @@ public MiniYARNCluster( * @param numLocalDirs the number of nm-local-dirs per nodemanager * @param numLogDirs the number of nm-log-dirs per nodemanager */ + @SuppressWarnings("deprecation") public MiniYARNCluster( String testName, int numResourceManagers, int numNodeManagers, int numLocalDirs, int numLogDirs) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml index c9fec01446823..10c31639a5997 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml @@ -22,11 +22,11 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timeline-pluginstorage - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Timeline Plugin Storage @@ -100,8 +100,8 @@ slf4j-api - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java index beff2751d2320..e9957c8a84983 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java @@ -22,8 +22,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.type.TypeFactory; import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LogInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LogInfo.java index fa3c85338e9a1..a4f056d104aa9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LogInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LogInfo.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.MappingIterator; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/EntityGroupPlugInForTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/EntityGroupPlugInForTest.java index 884b5cd18aafa..0195c9de02381 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/EntityGroupPlugInForTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/EntityGroupPlugInForTest.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.timeline; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId; import org.apache.hadoop.yarn.util.ConverterUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml index 18ccb24354156..c86bf4427d106 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml @@ -19,7 +19,7 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timelineservice-documentstore diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java index 1df89dee8b86d..6f17a9869535e 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.timelineservice.documentstore.reader.cosmosdb; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.microsoft.azure.cosmosdb.FeedOptions; import com.microsoft.azure.cosmosdb.FeedResponse; import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java index 3296584035768..549fe2b276e99 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.timelineservice.documentstore.writer.cosmosdb; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.microsoft.azure.cosmosdb.AccessCondition; import com.microsoft.azure.cosmosdb.AccessConditionType; import com.microsoft.azure.cosmosdb.Database; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml index 0b25e5b113ccd..81d206be9232c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml @@ -22,11 +22,11 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timelineservice-hbase-tests - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN TimelineService HBase tests @@ -154,6 +154,12 @@ + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + test + + com.google.guava guava @@ -383,8 +389,8 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin true diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java index abd5362c8e779..ad0fd64a7d0e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java @@ -60,8 +60,8 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.ClientResponse.Status; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java index 1148b80d1962a..d83f130338109 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java @@ -181,14 +181,13 @@ private static void waitForHBaseDown(HBaseTimelineReaderImpl htr) throws } } - private static void checkQuery(HBaseTimelineReaderImpl htr) throws - IOException { + private static Set checkQuery(HBaseTimelineReaderImpl htr) + throws IOException { TimelineReaderContext context = new TimelineReaderContext(YarnConfiguration.DEFAULT_RM_CLUSTER_ID, null, null, null, null, TimelineEntityType .YARN_FLOW_ACTIVITY.toString(), null, null); - Set entities = htr.getEntities(context, MONITOR_FILTERS, - DATA_TO_RETRIEVE); + return htr.getEntities(context, MONITOR_FILTERS, DATA_TO_RETRIEVE); } private static void configure(HBaseTestingUtility util) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml index 0288bb3e9b6e3..7661a4b067c58 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml @@ -22,7 +22,7 @@ hadoop-yarn-server-timelineservice-hbase org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timelineservice-hbase-client @@ -50,8 +50,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineSchemaCreator.java index 4263fac62b91d..5acdbe48e9e47 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineSchemaCreator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineSchemaCreator.java @@ -46,7 +46,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW; import org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainTableRW; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java index 29ba1845db954..6b492c3553e99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java @@ -58,7 +58,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils; import org.apache.hadoop.yarn.webapp.BadRequestException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Timeline entity reader for application entities that are stored in the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java index ebe21a4763231..59526f43d2003 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.timelineservice.storage.reader; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java index 7b7eef570156d..04810ac6b2b4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java @@ -46,7 +46,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW; import org.apache.hadoop.yarn.webapp.BadRequestException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Timeline entity reader for flow activity entities that are stored in the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java index 80d3e9b336361..5b2e642634dd4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java @@ -57,7 +57,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW; import org.apache.hadoop.yarn.webapp.BadRequestException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Timeline entity reader for flow run entities that are stored in the flow run diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java index 6e62f20aa7d9b..8a701e5ffdf7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java @@ -63,7 +63,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW; import org.apache.hadoop.yarn.webapp.BadRequestException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * Timeline entity reader for generic entities that are stored in the entity diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java index 6a91c7b46d027..08651e158c458 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java @@ -56,7 +56,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW; import org.apache.hadoop.yarn.webapp.BadRequestException; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; class SubApplicationEntityReader extends GenericEntityReader { private static final SubApplicationTableRW SUB_APPLICATION_TABLE = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml index 804dfaec8b285..808bbe56a2730 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml @@ -22,13 +22,13 @@ hadoop-yarn-server-timelineservice-hbase org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timelineservice-hbase-common Apache Hadoop YARN TimelineService HBase Common - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT @@ -85,8 +85,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java index 7d37206bbd1a4..05a3723c1e818 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; -import com.google.common.collect.Iterables; +import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; public class TestSeparator { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml index 57799e7422fee..16f7c98dbacce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml @@ -22,13 +22,13 @@ hadoop-yarn-server-timelineservice-hbase-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timelineservice-hbase-server-1 Apache Hadoop YARN TimelineService HBase Server 1.2 - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT @@ -55,8 +55,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java index 9ecb6f6023782..471ce4295c911 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java @@ -49,7 +49,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator; import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml index cf6119c74dfe4..3d162327dffa6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml @@ -22,13 +22,13 @@ hadoop-yarn-server-timelineservice-hbase-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timelineservice-hbase-server-2 Apache Hadoop YARN TimelineService HBase Server 2.0 - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT @@ -55,8 +55,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java index ff8d7ba51ca0d..1dbb5071e4a0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java @@ -49,7 +49,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator; import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thridparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml index 0dd25117e7924..b2ebc905b9056 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml @@ -22,12 +22,12 @@ hadoop-yarn-server-timelineservice-hbase org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timelineservice-hbase-server - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN TimelineService HBase Servers pom diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml index 6574eb80a113d..9d1a0efdd862f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml @@ -22,12 +22,12 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timelineservice-hbase - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN TimelineService HBase Backend pom diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml index 10a9f2141a0f4..fb33a520cf3f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml @@ -22,11 +22,11 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-timelineservice - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Timeline Service @@ -67,8 +67,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java index 38221fe98a8d4..e1109f98df71d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java @@ -29,8 +29,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java index aa041a524dc71..04430817cb302 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.timelineservice.collector; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java index b758da95dadcf..b6de08e679e21 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java @@ -53,8 +53,8 @@ import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java index 82dd793b8b2ae..4ac3f447a1e0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java @@ -46,7 +46,7 @@ import org.apache.hadoop.yarn.server.api.ContainerTerminationContext; import org.apache.hadoop.yarn.server.api.ContainerType; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java index d59b2e072d0df..70cfcbba00db3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java @@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/metrics/PerNodeAggTimelineCollectorMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/metrics/PerNodeAggTimelineCollectorMetrics.java index 024c61c45fdac..74a61209007d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/metrics/PerNodeAggTimelineCollectorMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/metrics/PerNodeAggTimelineCollectorMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.timelineservice.metrics; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/metrics/TimelineReaderMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/metrics/TimelineReaderMetrics.java index fbe96aaa5a908..bbd60cbe932d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/metrics/TimelineReaderMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/metrics/TimelineReaderMetrics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.timelineservice.metrics; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java index 10265c6586805..81610f14a41d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java @@ -50,7 +50,7 @@ import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java index fff06a0043781..d9233b1c788e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java @@ -24,7 +24,7 @@ import org.apache.commons.lang3.StringUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Set of utility methods to be used across timeline reader. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java index 5f8f33a4bbe1c..16c9ba9023a9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java @@ -60,7 +60,7 @@ import org.apache.hadoop.yarn.webapp.ForbiddenException; import org.apache.hadoop.yarn.webapp.NotFoundException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import com.google.inject.Singleton; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java index ea7c32ea6742e..359a1c57e11ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java @@ -57,7 +57,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils; import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java index 023d496bed565..c0062c7273899 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java @@ -41,7 +41,7 @@ import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java index 20ae8bfe0f1f0..d23236ba09e32 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.timelineservice.storage; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.util.ReflectionUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/OfflineAggregationInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/OfflineAggregationInfo.java index 3dc5f514b2e23..2393dcb58d818 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/OfflineAggregationInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/OfflineAggregationInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.timelineservice.storage.common; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext; import java.sql.PreparedStatement; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java index 766c2cd4bc61d..5ace793e3ecf1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java @@ -33,7 +33,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter; import org.junit.Test; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; import org.mockito.internal.stubbing.answers.AnswersWithDelay; import org.mockito.internal.stubbing.answers.Returns; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java index 1455d1785abf4..834eb87c2478a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java @@ -34,7 +34,7 @@ import org.junit.Assert; import org.junit.Test; -import com.google.common.collect.Sets; +import org.apache.hadoop.thirdparty.com.google.common.collect.Sets; public class TestTimelineReaderWebServicesUtils { private static void verifyFilterList(String expr, TimelineFilterList list, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml index cf5f228fd7385..dbd1e35f17875 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn-server org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server-web-proxy - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Web Proxy @@ -96,8 +96,8 @@ - com.google.guava - guava + org.apache.hadoop.thirdparty + hadoop-shaded-guava org.eclipse.jetty diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java index a44d764cbac5a..3a415eb6970a8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.yarn.server.webproxy; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java index 1382fc6ce1c74..f373e94b8a1a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java @@ -31,7 +31,7 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.fs.CommonConfigurationKeys; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java index ae9a01f67388b..0b6bb65d8db34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java @@ -45,7 +45,7 @@ import javax.ws.rs.core.UriBuilder; import javax.ws.rs.core.UriBuilderException; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java index 27742e41a29e2..c023aaebecabd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.webproxy.amfilter; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.FilterContainer; import org.apache.hadoop.http.FilterInitializer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java index be8e10cf44e9e..f282948d68565 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.webproxy.amfilter; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyUriUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyUriUtils.java index 6f1a516a0aa74..e4829d1f13c4f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyUriUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestProxyUriUtils.java @@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.util.TrackingUriPlugin; import org.junit.Test; -import com.google.common.collect.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; public class TestProxyUriUtils { @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java index 021f4a5e327d3..5fcdb62ea33ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java @@ -50,7 +50,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; -import com.google.common.base.Supplier; +import java.util.function.Supplier; import org.apache.hadoop.http.TestHttpServer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.server.webproxy.ProxyUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml index f17e6ef3e44bf..65b1d345c48ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-server - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Server pom diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml index ff460c29e5d64..405f4e796a8f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml @@ -19,11 +19,11 @@ hadoop-yarn org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-site - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN Site pom diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md index 1b84c2be1d4e5..71fe287ba6545 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md @@ -142,6 +142,23 @@ Configuration |:---- |:---- | | `yarn.scheduler.capacity.maximum-applications` / `yarn.scheduler.capacity..maximum-applications` | Maximum number of applications in the system which can be concurrently active both running and pending. Limits on each queue are directly proportional to their queue capacities and user limits. This is a hard limit and any applications submitted when this limit is reached will be rejected. Default is 10000. This can be set for all queues with `yarn.scheduler.capacity.maximum-applications` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..maximum-applications`. Integer value expected. | | `yarn.scheduler.capacity.maximum-am-resource-percent` / `yarn.scheduler.capacity..maximum-am-resource-percent` | Maximum percent of resources in the cluster which can be used to run application masters - controls number of concurrent active applications. Limits on each queue are directly proportional to their queue capacities and user limits. Specified as a float - ie 0.5 = 50%. Default is 10%. This can be set for all queues with `yarn.scheduler.capacity.maximum-am-resource-percent` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..maximum-am-resource-percent` | +| `yarn.scheduler.capacity.max-parallel-apps` / `yarn.scheduler.capacity..max-parallel-apps` | Maximum number of applications that can run at the same time. Unlike to `maximum-applications`, application submissions are *not* rejected when this limit is reached. Instead they stay in `ACCEPTED` state until they are eligible to run. This can be set for all queues with `yarn.scheduler.capacity.max-parallel-apps` and can also be overridden on a per queue basis by setting `yarn.scheduler.capacity..max-parallel-apps`. Integer value is expected. By default, there is no limit. | + + You can also limit the number of parallel applications on a per user basis. + +| Property | Description | +|:---- |:---- | +| `yarn.scheduler.capacity.user.max-parallel-apps` | Maximum number of applications that can run at the same time for all users. Default value is unlimited. | +| `yarn.scheduler.capacity.user..max-parallel-apps` | Maximum number of applications that can run at the same for a specific user. This overrides the global setting. | + + + The evaluation of these limits happens in the following order: + +1. `maximum-applications` check - if the limit is exceeded, the submission is rejected immediately. + +2. `max-parallel-apps` check - the submission is accepted, but the application will not transition to `RUNNING` state. It stays in `ACCEPTED` until the queue / user limits are satisfied. + +3. `maximum-am-resource-percent` check - if there are too many Application Masters running, the application stays in `ACCEPTED` state until there is enough room for it. * Queue Administration & Permissions @@ -325,7 +342,7 @@ The `ReservationSystem` is integrated with the `CapacityScheduler` queue hierach | `yarn.scheduler.capacity..reservation-window` | *Optional* parameter representing the time in milliseconds for which the `SharingPolicy` will validate if the constraints in the Plan are satisfied. Long value expected. The default value is one day. | | `yarn.scheduler.capacity..instantaneous-max-capacity` | *Optional* parameter: maximum capacity at any time in percentage (%) as a float that the `SharingPolicy` allows a single user to reserve. The default value is 1, i.e. 100%. | | `yarn.scheduler.capacity..average-capacity` | *Optional* parameter: the average allowed capacity which will aggregated over the *ReservationWindow* in percentage (%) as a float that the `SharingPolicy` allows a single user to reserve. The default value is 1, i.e. 100%. | -| `yarn.scheduler.capacity..reservation-planner` | *Optional* parameter: the class name that will be used to determine the implementation of the *Planner* which will be invoked if the `Plan` capacity fall below (due to scheduled maintenance or node failuers) the user reserved resources. The default value is *org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.SimpleCapacityReplanner* which scans the `Plan` and greedily removes reservations in reversed order of acceptance (LIFO) till the reserved resources are within the `Plan` capacity | +| `yarn.scheduler.capacity..reservation-planner` | *Optional* parameter: the class name that will be used to determine the implementation of the *Planner* which will be invoked if the `Plan` capacity fall below (due to scheduled maintenance or node failures) the user reserved resources. The default value is *org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.SimpleCapacityReplanner* which scans the `Plan` and greedily removes reservations in reversed order of acceptance (LIFO) till the reserved resources are within the `Plan` capacity | | `yarn.scheduler.capacity..reservation-enforcement-window` | *Optional* parameter representing the time in milliseconds for which the `Planner` will validate if the constraints in the Plan are satisfied. Long value expected. The default value is one hour. | ###Dynamic Auto-Creation and Management of Leaf Queues diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md index db9c56d99ee7b..64a6123983d89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md @@ -284,7 +284,7 @@ are allowed. It contains the following properties: | `docker.trusted.registries` | Comma separated list of trusted docker registries for running trusted privileged docker containers. By default, no registries are defined. | | `docker.inspect.max.retries` | Integer value to check docker container readiness. Each inspection is set with 3 seconds delay. Default value of 10 will wait 30 seconds for docker container to become ready before marked as container failed. | | `docker.no-new-privileges.enabled` | Enable/disable the no-new-privileges flag for docker run. Set to "true" to enable, disabled by default. | -| `docker.allowed.runtimes` | Comma seperated runtimes that containers are allowed to use. By default no runtimes are allowed to be added.| +| `docker.allowed.runtimes` | Comma separated runtimes that containers are allowed to use. By default no runtimes are allowed to be added.| | `docker.service-mode.enabled` | Set to "true" or "false" to enable or disable docker container service mode. Default value is "false". | Please note that if you wish to run Docker containers that require access to the YARN local directories, you must add them to the docker.allowed.rw-mounts list. @@ -1034,7 +1034,7 @@ node manager environment white list: ``` yarn.nodemanager.env-whitelist - JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME,YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE + JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ,HADOOP_MAPRED_HOME,YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE ``` diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md index d2f3247cd938e..c7836e75475b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md @@ -256,7 +256,7 @@ Optional: |`yarn.router.submit.retry` | `3` | The number of retries in the router before we give up. | |`yarn.federation.statestore.max-connections` | `10` | This is the maximum number of parallel connections each Router makes to the state-store. | |`yarn.federation.cache-ttl.secs` | `60` | The Router caches informations, and this is the time to leave before the cache is invalidated. | -|`yarn.router.webapp.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.router.webapp.FederationInterceptorREST` | A comma-seperated list of interceptor classes to be run at the router when interfacing with the client via REST interface. The last step of this pipeline must be the Federation Interceptor REST. | +|`yarn.router.webapp.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.router.webapp.FederationInterceptorREST` | A comma-separated list of interceptor classes to be run at the router when interfacing with the client via REST interface. The last step of this pipeline must be the Federation Interceptor REST. | ###ON NMs: diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeAttributes.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeAttributes.md index 51280043b708b..b733384c303cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeAttributes.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeAttributes.md @@ -127,7 +127,9 @@ Here is an example for creating a Scheduling Request object with NodeAttribute e .build(); The above SchedulingRequest requests for 1 container on nodes that must satisfy following constraints: + 1. Node attribute *`rm.yarn.io/python`* doesn't exist on the node or it exist but its value is not equal to 3 + 2. Node attribute *`rm.yarn.io/java`* must exist on the node and its value is equal to 1.8 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md index 42147b608416c..596a47e4e673d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md @@ -202,4 +202,38 @@ The following settings need to be set in *yarn-site.xml*. yarn.nodemanager.aux-services.mapreduce_shuffle.class org.apache.hadoop.mapred.ShuffleHandler - \ No newline at end of file + + +Prevent Container Logs From Getting Too Big +------------------------------------------- + +This allows a cluster admin to configure a cluster such that a task attempt will be killed if any container log exceeds a configured size. This helps prevent logs from filling disks and also prevent the need to aggregate enormous logs. + +### Configuration + +The following parameters can be used to configure the container log dir sizes. + +| Configuration Name | Allowed Values | Description | +|:---- |:---- |:---- | +| `yarn.nodemanager.container-log-monitor.enable` | true, false | Flag to enable the container log monitor which enforces container log directory size limits. Default is false. | +| `yarn.nodemanager.container-log-monitor.interval-ms` | Positive integer | How often to check the usage of a container's log directories in milliseconds. Default is 60000 ms. | +| `yarn.nodemanager.container-log-monitor.dir-size-limit-bytes` | Long | The disk space limit, in bytes, for a single container log directory. Default is 1000000000. | +| `yarn.nodemanager.container-log-monitor.total-size-limit-bytes` | Long | The disk space limit, in bytes, for all of a container's logs. The default is 10000000000. | + +Scale Heart-beat Interval Based on CPU Utilization +------------------------------------------------- + +This allows a cluster admin to configure a cluster to allow the heart-beat between the Resource Manager and each NodeManager to be scaled based on the CPU utilization of the node compared to the overall CPU utilization of the cluster. + +### Configuration + +The following parameters can be used to configure the heart-beat interval and whether and how it scales. + +| Configuration Name | Allowed Values | Description | +|:---- |:---- |:---- | +| `yarn.resourcemanager.nodemanagers.heartbeat-interval-ms` | Long | Specifies the default heart-beat interval in milliseconds for every NodeManager in the cluster. Default is 1000 ms. | +| `yarn.resourcemanager.nodemanagers.heartbeat-interval-scaling-enable` | true, false | Enables heart-beat interval scaling. If true, The NodeManager heart-beat interval will scale based on the difference between the CPU utilization on the node and the cluster-wide average CPU utilization. Default is false. | +| `yarn.resourcemanager.nodemanagers.heartbeat-interval-min-ms` | Positive Long | If heart-beat interval scaling is enabled, this is the minimum heart-beat interval in milliseconds. Default is 1000 ms. | +| `yarn.resourcemanager.nodemanagers.heartbeat-interval-max-ms` | Positive Long | If heart-beat interval scaling is enabled, this is the maximum heart-beat interval in milliseconds. Default is 1000 ms. | +| `yarn.resourcemanager.nodemanagers.heartbeat-interval-speedup-factor` | Positive Float | If heart-beat interval scaling is enabled, this controls the degree of adjustment when speeding up heartbeat intervals. At 1.0, 20% less than the average cluster-wide CPU utilization will result in a 20% decrease in the heartbeat interval. Default is 1.0. | +| `yarn.resourcemanager.nodemanagers.heartbeat-interval-slowdown-factor` | Positive Float | If heart-beat interval scaling is enabled, this controls the degree of adjustment when slowing down heartbeat intervals. At 1.0, 20% greater than the average cluster-wide CPU utilization will result in a 20% increase in the heartbeat interval. Default is 1.0. | diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md index 4b8c2cb559cbc..a30221d7a6ce3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md @@ -338,6 +338,9 @@ The capacity scheduler supports hierarchical queues. This one request will print | numActiveApplications | int | The number of active applications in this queue | | numPendingApplications | int | The number of pending applications in this queue | | numContainers | int | The number of containers being used | +| allocatedContainers | int | The number of allocated containers in this queue | +| reservedContainers | int | The number of reserved containers in this queue | +| pendingContainers | int | The number of pending containers in this queue | | maxApplications | int | The maximum number of applications this queue can have | | maxApplicationsPerUser | int | The maximum number of applications per user this queue can have | | maxActiveApplications | int | The maximum number of active applications this queue can have | diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml index 06e589b133c18..e9fb9e94af951 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml @@ -20,11 +20,11 @@ hadoop-yarn org.apache.hadoop - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT 4.0.0 hadoop-yarn-ui - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop YARN UI ${packagingType} @@ -65,7 +65,6 @@ maven-clean-plugin - 3.0.0 ${keepUIBuildCache} false @@ -185,8 +184,8 @@ install-node-and-yarn - v8.17.0 - v1.21.1 + v10.23.1 + v1.22.5 @@ -203,7 +202,7 @@ generate-resources bower install - install + install --allow-root bower diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-jhs-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-jhs-log.js index 3bb241317c2e4..911e04062bcc2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-jhs-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-jhs-log.js @@ -38,11 +38,19 @@ export default RESTAbstractAdapter.extend({ var containerId = splits[0]; var logFile = splits[1]; var url = this._buildURL(); - url = url + '/containerlogs/' + containerId + '/' + logFile; + url = url + '/containerlogs/' + containerId + '/' + logFile + '?manual_redirection=true'; Ember.Logger.info('The URL for getting the log: ' + url); return url; }, + handleResponse(status, headers, payload, requestData) { + if (headers['location'] !== undefined && headers['location'] !== null) { + return { redirectedUrl: headers.location, data: "" } + } else { + return { data: payload } + } + }, + /** * Override options so that result is not expected to be JSON */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-jhs-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-jhs-redirect-log.js new file mode 100644 index 0000000000000..0f7e47d649632 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-jhs-redirect-log.js @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import Converter from 'yarn-ui/utils/converter'; +import RESTAbstractAdapter from './restabstract'; + +/** + * REST URL's response when fetching container logs will be + * in plain text format and not JSON. + */ +export default RESTAbstractAdapter.extend({ + headers: { + Accept: 'text/plain' + }, + + urlForFindRecord(id/*, modelName, snapshot*/) { + var splits = Converter.splitForAppLogs(id); + var url = splits[0]; + return url; + }, + + /** + * Override options so that result is not expected to be JSON + */ + ajaxOptions: function (url, type, options) { + var hash = options || {}; + hash.url = url; + hash.type = type; + // Make sure jQuery does not try to convert response to JSON. + hash.dataType = 'text'; + hash.context = this; + + var headers = Ember.get(this, 'headers'); + if (headers !== undefined) { + hash.beforeSend = function (xhr) { + Object.keys(headers).forEach(function (key) { + return xhr.setRequestHeader(key, headers[key]); + }); + }; + } + return hash; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-log.js index 07bf613049483..e4ee6d11737a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-log.js @@ -42,11 +42,19 @@ export default RESTAbstractAdapter.extend({ clusterId = splits[2]; } var url = this._buildURL(); - url = url + '/containers/' + containerId + '/logs/' + logFile + '?clusterid=' + clusterId; + url = url + '/containers/' + containerId + '/logs/' + logFile + '?clusterid=' + clusterId + '?manual_redirection=true'; Ember.Logger.info('The URL for getting the log: ' + url); return url; }, + handleResponse(status, headers, payload, requestData) { + if (headers['location'] !== undefined && headers['location'] !== null) { + return { redirectedUrl: headers.location, data: "" } + } else { + return { data: payload } + } + }, + /** * Override options so that result is not expected to be JSON */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-redirect-log.js new file mode 100644 index 0000000000000..ba6accb2fdd18 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-redirect-log.js @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import Converter from 'yarn-ui/utils/converter'; +import RESTAbstractAdapter from './restabstract'; + +/** + * REST URL's response when fetching container logs will be + * in plain text format and not JSON. + */ +export default RESTAbstractAdapter.extend({ + + headers: { + Accept: 'text/plain' + }, + + urlForFindRecord(id/*, modelName, snapshot*/) { + var splits = Converter.splitForAppLogs(id); + var url = splits[0]; + return url; + }, + + /** + * Override options so that result is not expected to be JSON + */ + ajaxOptions: function (url, type, options) { + var hash = options || {}; + hash.url = url; + hash.type = type; + // Make sure jQuery does not try to convert response to JSON. + hash.dataType = 'text'; + hash.context = this; + + var headers = Ember.get(this, 'headers'); + if (headers !== undefined) { + hash.beforeSend = function (xhr) { + Object.keys(headers).forEach(function (key) { + return xhr.setRequestHeader(key, headers[key]); + }); + }; + } + return hash; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-jhs-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-jhs-log.js index b0c278492316d..97fe57bb9585a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-jhs-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-jhs-log.js @@ -17,6 +17,7 @@ */ import AbstractAdapter from './abstract'; +import { createEmptyContainerLogInfo } from 'yarn-ui/helpers/log-adapter-helper'; export default AbstractAdapter.extend({ address: "jhsAddress", @@ -27,6 +28,15 @@ export default AbstractAdapter.extend({ var url = this._buildURL(); var containerId = query['containerId']; delete query.containerId; - return url + '/containers/' + containerId + '/logs'; + return url + '/containers/' + containerId + '/logs' + '?manual_redirection=true'; + }, + + handleResponse(status, headers, payload, requestData) { + if (headers['location'] !== undefined && headers['location'] !== null) { + return createEmptyContainerLogInfo(headers['location']); + } else { + return payload; + } } + }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-jhs-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-jhs-redirect-log.js new file mode 100644 index 0000000000000..b3bb66e8a471e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-jhs-redirect-log.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import AbstractAdapter from './abstract'; + +export default AbstractAdapter.extend({ + address: "jhsAddress", + restNameSpace: "jhs", + serverName: "JHS", + + urlForQuery(url/*, modelName*/) { + return url; + } + +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js index 66f34061546e3..171dda740dff6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js @@ -18,6 +18,19 @@ import AbstractAdapter from './abstract'; +function createEmptyContainerLogInfo(location) { + return { + containerLogsInfo: { + containerLogInfo: [{ + fileName: "", + fileSize: "", + lastModifiedTime: "", + redirectedUrl: location + }] + } + }; +} + export default AbstractAdapter.extend({ address: "timelineWebAddress", restNameSpace: "timelineV2Log", @@ -28,6 +41,15 @@ export default AbstractAdapter.extend({ var containerId = query['containerId']; var clusterId = this.get("env.app.clusterId"); delete query.containerId; - return url + '/containers/' + containerId + '/logs' + '?clusterid=' + clusterId; + return url + '/containers/' + containerId + '/logs' + '?clusterid=' + clusterId + '?manual_redirection=true'; + }, + + handleResponse(status, headers, payload, requestData) { + if (headers['location'] !== undefined && headers['location'] !== null) { + return createEmptyContainerLogInfo(headers['location']); + } else { + return payload; + } } + }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-redirect-log.js new file mode 100644 index 0000000000000..7c2b81e025b75 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-redirect-log.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import AbstractAdapter from './abstract'; + +export default AbstractAdapter.extend({ + address: "timelineWebAddress", + restNameSpace: "timelineV2Log", + serverName: "ATS", + + urlForQuery(url/*, modelName*/) { + return url; + } + +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/logs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/logs.js index b92890a42a880..4a5a444ee6267 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/logs.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/logs.js @@ -235,26 +235,54 @@ export default Ember.Controller.extend({ fetchLogFilesForContainerId(containerId) { let queryName = this.fallbackToJHS ? "yarn-jhs-log" : "yarn-log"; + let redirectQuery = queryName === "yarn-jhs-log" ? "yarn-jhs-redirect-log" : "yarn-redirect-log"; return Ember.RSVP.hash({ - logs: this.store - .query(queryName, { - containerId: containerId - }) - .catch(function() { - return Ember.A(); + logs: this.resolveRedirectableQuery( + this.store.query(queryName, { containerId }), + m => { + return m.map(model => model.get('redirectedUrl'))[0]; + }, + url => { + return this.store.query(redirectQuery, url); }) }); }, fetchContentForLogFile(id) { let queryName = this.fallbackToJHS ? 'yarn-app-jhs-log' : 'yarn-app-log'; + let redirectQuery = queryName === "yarn-app-jhs-log" ? "yarn-app-jhs-redirect-log" : "yarn-app-redirect-log"; return Ember.RSVP.hash({ - logs: this.store.findRecord(queryName, id) + logs: this.resolveRedirectableQuery( + this.store.findRecord(queryName, id), + m => { + return m.get('redirectedUrl'); + }, + url => { + return this.store.findRecord(redirectQuery, url + Constants.PARAM_SEPARATOR + id); + }) }); }, + resolveRedirectableQuery(initial, urlResolver, redirectResolver) { + return initial.then(m => { + let redirectedUrl = urlResolver(m); + if (redirectedUrl !== null && redirectedUrl !== undefined && redirectedUrl !== '') { + let logFromRedirect = redirectResolver(redirectedUrl); + return Promise.all([m, logFromRedirect]); + } else { + return Promise.all([m, null]); + } + }) + .then(([originalLog, logFromRedirect]) => { + return logFromRedirect !== null ? logFromRedirect : originalLog; + }) + .catch(function () { + return Ember.A(); + }); + }, + resetAfterRefresh() { this.set("selectedAttemptId", ""); this.set("attemptContainerList", null); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-adapter-helper.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-adapter-helper.js new file mode 100644 index 0000000000000..616a38b3f1c74 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-adapter-helper.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export function createEmptyContainerLogInfo(location) { + return { + containerLogsInfo: { + containerLogInfo: [{ + fileName: "", + fileSize: "", + lastModifiedTime: "", + redirectedUrl: location + }] + } + }; +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-jhs-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-jhs-log.js index 31cf61ecbcd7f..87b9fa69d8831 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-jhs-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-jhs-log.js @@ -19,7 +19,8 @@ import DS from 'ember-data'; export default DS.Model.extend({ - logs: DS.attr('string'), - containerID: DS.attr('string'), - logFileName: DS.attr('string') + logs: DS.attr('string', {defaultValue: ''}), + containerID: DS.attr('string', {defaultValue: ''}), + logFileName: DS.attr('string', {defaultValue: ''}), + redirectedUrl: DS.attr('string', {defaultValue: ''}), }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-jhs-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-jhs-redirect-log.js new file mode 100644 index 0000000000000..87b9fa69d8831 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-jhs-redirect-log.js @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + logs: DS.attr('string', {defaultValue: ''}), + containerID: DS.attr('string', {defaultValue: ''}), + logFileName: DS.attr('string', {defaultValue: ''}), + redirectedUrl: DS.attr('string', {defaultValue: ''}), +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-log.js index 31cf61ecbcd7f..87b9fa69d8831 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-log.js @@ -19,7 +19,8 @@ import DS from 'ember-data'; export default DS.Model.extend({ - logs: DS.attr('string'), - containerID: DS.attr('string'), - logFileName: DS.attr('string') + logs: DS.attr('string', {defaultValue: ''}), + containerID: DS.attr('string', {defaultValue: ''}), + logFileName: DS.attr('string', {defaultValue: ''}), + redirectedUrl: DS.attr('string', {defaultValue: ''}), }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-redirect-log.js new file mode 100644 index 0000000000000..87b9fa69d8831 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-redirect-log.js @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + logs: DS.attr('string', {defaultValue: ''}), + containerID: DS.attr('string', {defaultValue: ''}), + logFileName: DS.attr('string', {defaultValue: ''}), + redirectedUrl: DS.attr('string', {defaultValue: ''}), +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-jhs-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-jhs-log.js index f022bc718b548..ab96c075d404f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-jhs-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-jhs-log.js @@ -19,9 +19,10 @@ import DS from 'ember-data'; export default DS.Model.extend({ - fileName: DS.attr('string'), - fileSize: DS.attr('string'), - lastModifiedTime: DS.attr('string'), - containerId: DS.attr('string'), - nodeId: DS.attr('string') + fileName: DS.attr('string', {defaultValue: ''}), + fileSize: DS.attr('string', {defaultValue: ''}), + lastModifiedTime: DS.attr('string', {defaultValue: ''}), + containerId: DS.attr('string', {defaultValue: ''}), + nodeId: DS.attr('string', {defaultValue: ''}), + redirectedUrl: DS.attr('string', {defaultValue: ''}) }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-jhs-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-jhs-redirect-log.js new file mode 100644 index 0000000000000..ab96c075d404f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-jhs-redirect-log.js @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + fileName: DS.attr('string', {defaultValue: ''}), + fileSize: DS.attr('string', {defaultValue: ''}), + lastModifiedTime: DS.attr('string', {defaultValue: ''}), + containerId: DS.attr('string', {defaultValue: ''}), + nodeId: DS.attr('string', {defaultValue: ''}), + redirectedUrl: DS.attr('string', {defaultValue: ''}) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-log.js index f022bc718b548..ab96c075d404f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-log.js @@ -19,9 +19,10 @@ import DS from 'ember-data'; export default DS.Model.extend({ - fileName: DS.attr('string'), - fileSize: DS.attr('string'), - lastModifiedTime: DS.attr('string'), - containerId: DS.attr('string'), - nodeId: DS.attr('string') + fileName: DS.attr('string', {defaultValue: ''}), + fileSize: DS.attr('string', {defaultValue: ''}), + lastModifiedTime: DS.attr('string', {defaultValue: ''}), + containerId: DS.attr('string', {defaultValue: ''}), + nodeId: DS.attr('string', {defaultValue: ''}), + redirectedUrl: DS.attr('string', {defaultValue: ''}) }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-redirect-log.js new file mode 100644 index 0000000000000..ab96c075d404f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-redirect-log.js @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + fileName: DS.attr('string', {defaultValue: ''}), + fileSize: DS.attr('string', {defaultValue: ''}), + lastModifiedTime: DS.attr('string', {defaultValue: ''}), + containerId: DS.attr('string', {defaultValue: ''}), + nodeId: DS.attr('string', {defaultValue: ''}), + redirectedUrl: DS.attr('string', {defaultValue: ''}) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-jhs-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-jhs-log.js index 3cfabd38186c2..716526cdc66df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-jhs-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-jhs-log.js @@ -28,9 +28,10 @@ export default DS.JSONAPISerializer.extend({ id: id, type: primaryModelClass.modelName, attributes: { - logs: payload, + logs: payload.data, containerID: splits[0], - logFileName: splits[1] + logFileName: splits[1], + redirectedUrl: payload.redirectedUrl } }; return { data: convertedPayload }; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-jhs-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-jhs-redirect-log.js new file mode 100644 index 0000000000000..8660c51e67ff2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-jhs-redirect-log.js @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Converter from 'yarn-ui/utils/converter'; + +export default DS.JSONAPISerializer.extend({ + normalizeSingleResponse(store, primaryModelClass, payload, id/*, requestType*/) { + var splits = Converter.splitForAppLogs(id); + var convertedPayload = { + id: id, + type: primaryModelClass.modelName, + attributes: { + logs: payload, + containerID: splits[1], + logFileName: splits[2] + } + }; + return { data: convertedPayload }; + }, +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-log.js index 3cfabd38186c2..716526cdc66df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-log.js @@ -28,9 +28,10 @@ export default DS.JSONAPISerializer.extend({ id: id, type: primaryModelClass.modelName, attributes: { - logs: payload, + logs: payload.data, containerID: splits[0], - logFileName: splits[1] + logFileName: splits[1], + redirectedUrl: payload.redirectedUrl } }; return { data: convertedPayload }; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-redirect-log.js new file mode 100644 index 0000000000000..8660c51e67ff2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-redirect-log.js @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Converter from 'yarn-ui/utils/converter'; + +export default DS.JSONAPISerializer.extend({ + normalizeSingleResponse(store, primaryModelClass, payload, id/*, requestType*/) { + var splits = Converter.splitForAppLogs(id); + var convertedPayload = { + id: id, + type: primaryModelClass.modelName, + attributes: { + logs: payload, + containerID: splits[1], + logFileName: splits[2] + } + }; + return { data: convertedPayload }; + }, +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-log.js index c785f7bbc96f4..f9db7b9e4b006 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-log.js @@ -26,6 +26,7 @@ export default DS.JSONAPISerializer.extend({ fileName: payload.fileName, fileSize: payload.fileSize, lastModifiedTime: payload.lastModifiedTime, + redirectedUrl: payload.redirectedUrl, containerId: containerId, nodeId: nodeId } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-redirect-log.js new file mode 100644 index 0000000000000..f9db7b9e4b006 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-redirect-log.js @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import DS from 'ember-data'; + +export default DS.JSONAPISerializer.extend({ + internalNormalizeSingleResponse(store, primaryModelClass, payload, containerId, nodeId) { + var fixedPayload = { + id: "yarn_log_" + payload.fileName + "_" + Date.now(), + type: primaryModelClass.modelName, + attributes: { + fileName: payload.fileName, + fileSize: payload.fileSize, + lastModifiedTime: payload.lastModifiedTime, + redirectedUrl: payload.redirectedUrl, + containerId: containerId, + nodeId: nodeId + } + }; + return fixedPayload; + }, + + normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) { + var normalizedArrayResponse = { + data: [] + }; + // If JSON payload is an object with a containerLogsInfo property + if (payload && payload.containerLogsInfo && payload.containerLogsInfo.containerLogInfo) { + normalizedArrayResponse.data = payload.containerLogsInfo.containerLogInfo.map((signle_payload) => { + return this.internalNormalizeSingleResponse(store, primaryModelClass, signle_payload, + payload.containerLogsInfo.containerId, payload.containerLogsInfo.nodeId); + }); + } + // If JSON payload is an array + if (payload && payload[0] && payload[0].containerLogInfo) { + normalizedArrayResponse.data = payload[0].containerLogInfo.map((signle_payload) => { + return this.internalNormalizeSingleResponse(store, primaryModelClass, signle_payload, + payload[0].containerId, payload[0].nodeId); + }); + } + return normalizedArrayResponse; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-redirect-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-redirect-log.js new file mode 100644 index 0000000000000..c41170870fa69 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-redirect-log.js @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import DS from 'ember-data'; + +export default DS.JSONAPISerializer.extend({ + internalNormalizeSingleResponse(store, primaryModelClass, payload, containerId, nodeId) { + var fixedPayload = { + id: "yarn_log_" + payload.fileName + "_" + Date.now(), + type: primaryModelClass.modelName, + attributes: { + fileName: payload.fileName, + fileSize: payload.fileSize, + lastModifiedTime: payload.lastModifiedTime, + containerId: containerId, + nodeId: nodeId + } + }; + return fixedPayload; + }, + + normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) { + var normalizedArrayResponse = { + data: [] + }; + if (payload && payload.containerLogsInfo && payload.containerLogsInfo.containerLogInfo) { + normalizedArrayResponse.data = payload.containerLogsInfo.containerLogInfo.map((paylog) => { + return this.internalNormalizeSingleResponse(store, primaryModelClass, paylog, + payload.containerLogsInfo.containerId, payload.containerLogsInfo.nodeId); + }); + } + if (payload && payload[0] && payload[0].containerLogInfo) { + normalizedArrayResponse.data = payload[0].containerLogInfo.map((paylog) => { + return this.internalNormalizeSingleResponse(store, primaryModelClass, paylog, + payload[0].containerId, payload[0].nodeId); + }); + } + return normalizedArrayResponse; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index 1e222cad94e14..f0c1b61b18973 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -16,11 +16,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../../hadoop-project hadoop-yarn - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT pom Apache Hadoop YARN @@ -39,10 +39,9 @@ - org.codehaus.mojo - findbugs-maven-plugin + com.github.spotbugs + spotbugs-maven-plugin - true true ${yarn.basedir}/dev-support/findbugs-exclude.xml Max @@ -128,7 +127,7 @@ - javadoc + javadoc-no-fork prepare-package diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index a979cc8a6f987..b37de1c7b8a12 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -18,11 +18,11 @@ org.apache.hadoop hadoop-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT ../hadoop-project hadoop-yarn-project - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT pom Apache Hadoop YARN Project https://hadoop.apache.org/yarn/ @@ -204,12 +204,9 @@ - org.codehaus.mojo - findbugs-maven-plugin - - 2.3.2 + com.github.spotbugs + spotbugs-maven-plugin - true true diff --git a/pom.xml b/pom.xml index fa1e98423e4b1..c61d46a9babc9 100644 --- a/pom.xml +++ b/pom.xml @@ -18,7 +18,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x 4.0.0 org.apache.hadoop hadoop-main - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT Apache Hadoop Main Apache Hadoop Main pom @@ -80,7 +80,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x - 3.3.0-SNAPSHOT + 3.3.1-SNAPSHOT apache.snapshots.https Apache Development Snapshot Repository @@ -113,6 +113,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x 3.1.0 8.29 1.4.3 + 4.2.2 + 4.2.0 bash @@ -342,6 +344,18 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x dependency-check-maven ${dependency-check-maven.version} + + com.github.spotbugs + spotbugs-maven-plugin + ${spotbugs-maven-plugin.version} + + + com.github.spotbugs + spotbugs + ${spotbugs.version} + + + @@ -443,6 +457,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x dependency-check-maven ${dependency-check-maven.version} + + com.github.spotbugs + spotbugs-maven-plugin +
    diff --git a/start-build-env.sh b/start-build-env.sh index fc54c7a9985d1..2ee44de41aa6e 100755 --- a/start-build-env.sh +++ b/start-build-env.sh @@ -66,13 +66,16 @@ if [ "$(uname -s)" = "Linux" ]; then fi fi +# Set the home directory in the Docker container. +DOCKER_HOME_DIR=${DOCKER_HOME_DIR:-/home/${USER_NAME}} + docker build -t "hadoop-build-${USER_ID}" - < "/etc/sudoers.d/hadoop-build-${USER_ID}" -ENV HOME /home/${USER_NAME} +ENV HOME "${DOCKER_HOME_DIR}" UserSpecificDocker @@ -85,9 +88,9 @@ DOCKER_INTERACTIVE_RUN=${DOCKER_INTERACTIVE_RUN-"-i -t"} # system. And this also is a significant speedup in subsequent # builds because the dependencies are downloaded only once. docker run --rm=true $DOCKER_INTERACTIVE_RUN \ - -v "${PWD}:/home/${USER_NAME}/hadoop${V_OPTS:-}" \ - -w "/home/${USER_NAME}/hadoop" \ - -v "${HOME}/.m2:/home/${USER_NAME}/.m2${V_OPTS:-}" \ - -v "${HOME}/.gnupg:/home/${USER_NAME}/.gnupg${V_OPTS:-}" \ + -v "${PWD}:${DOCKER_HOME_DIR}/hadoop${V_OPTS:-}" \ + -w "${DOCKER_HOME_DIR}/hadoop" \ + -v "${HOME}/.m2:${DOCKER_HOME_DIR}/.m2${V_OPTS:-}" \ + -v "${HOME}/.gnupg:${DOCKER_HOME_DIR}/.gnupg${V_OPTS:-}" \ -u "${USER_ID}" \ "hadoop-build-${USER_ID}" "$@"
    {{container.bare_host}}{{container.id}}{{container.id}} {{container.launch_time | date:'yyyy-MM-dd HH:mm:ss Z'}} {{container.state}}